You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2020/04/21 08:33:50 UTC

[hadoop-ozone] branch master updated: HDDS-3101. Depend on lightweight ConfigurationSource interface instead of Hadoop Configuration

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new ffb340e  HDDS-3101. Depend on lightweight ConfigurationSource interface instead of Hadoop Configuration
ffb340e is described below

commit ffb340e32460ccaa2eae557f0bb71fb90d7ebc7a
Author: Elek Márton <el...@apache.org>
AuthorDate: Tue Apr 21 10:02:29 2020 +0200

    HDDS-3101. Depend on lightweight ConfigurationSource interface instead of Hadoop Configuration
    
    Closes #834
---
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  42 +-
 .../hadoop/hdds/scm/XceiverClientManager.java      |  10 +-
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  23 +-
 .../hadoop/hdds/scm/client/HddsClientUtils.java    |   7 +-
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  25 +-
 .../hadoop/hdds/conf/OzoneConfiguration.java       | 193 ++------
 .../java/org/apache/hadoop/hdds/fs/DUFactory.java  |  11 +-
 .../hdds/fs/DedicatedDiskSpaceUsageFactory.java    |  11 +-
 .../hadoop/hdds/fs/SpaceUsageCheckFactory.java     |  26 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  26 +-
 .../hadoop/hdds/scm/ByteStringConversion.java      |   4 +-
 .../hadoop/hdds/scm/net/NetworkTopologyImpl.java   |   4 +-
 .../hadoop/hdds/scm/net/NodeSchemaManager.java     |   4 +-
 .../hadoop/hdds/security/x509/SecurityConfig.java  |  22 +-
 .../apache/hadoop/hdds/tracing/TracingUtil.java    |   7 +-
 .../utils/LegacyHadoopConfigurationSource.java     |  74 +++
 .../org/apache/hadoop/ozone/OzoneSecurityUtil.java |  28 +-
 .../org/apache/hadoop/ozone/lock/LockManager.java  |   6 +-
 .../java/org/apache/hadoop/hdds/TestHddsUtils.java |   7 +-
 .../hadoop/hdds/conf/TestOzoneConfiguration.java   |  18 +-
 .../org/apache/hadoop/hdds/fs/TestDUFactory.java   |   8 +-
 .../fs/TestDedicatedDiskSpaceUsageFactory.java     |   8 +-
 .../hadoop/hdds/fs/TestSpaceUsageFactory.java      |  40 +-
 .../hdds/scm/net/TestNetworkTopologyImpl.java      |  38 +-
 .../hadoop/hdds/scm/net/TestNodeSchemaManager.java |  16 +-
 hadoop-hdds/config/pom.xml                         |   5 +-
 .../hdds/conf/ConfigurationReflectionUtil.java     | 159 +++++++
 .../hadoop/hdds/conf/ConfigurationSource.java      | 291 ++++++++++++
 .../org/apache/hadoop/hdds/conf/StorageSize.java   | 102 ++++
 .../org/apache/hadoop/hdds/conf/StorageUnit.java   | 529 +++++++++++++++++++++
 .../apache/hadoop/hdds/conf/TimeDurationUtil.java  | 154 ++++++
 .../hadoop/ozone/HddsDatanodeHttpServer.java       |   4 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |   4 +-
 .../container/common/helpers/ContainerMetrics.java |   4 +-
 .../container/common/impl/ChunkLayOutVersion.java  |  19 +-
 .../container/common/impl/HddsDispatcher.java      |   6 +-
 .../ozone/container/common/interfaces/Handler.java |   8 +-
 .../container/common/report/ReportManager.java     |  21 +-
 .../container/common/report/ReportPublisher.java   |  27 +-
 .../common/report/ReportPublisherFactory.java      |  32 +-
 .../common/statemachine/DatanodeStateMachine.java  |  51 +-
 .../common/statemachine/EndpointStateMachine.java  |   6 +-
 .../common/statemachine/SCMConnectionManager.java  |  63 +--
 .../common/statemachine/StateContext.java          |  62 ++-
 .../CreatePipelineCommandHandler.java              |  29 +-
 .../commandhandler/DeleteBlocksCommandHandler.java |   6 +-
 .../ReplicateContainerCommandHandler.java          |   6 +-
 .../common/states/datanode/InitDatanodeState.java  |  32 +-
 .../states/datanode/RunningDatanodeState.java      |   6 +-
 .../states/endpoint/HeartbeatEndpointTask.java     |  10 +-
 .../states/endpoint/RegisterEndpointTask.java      |  10 +-
 .../states/endpoint/VersionEndpointTask.java       |  23 +-
 .../common/transport/server/XceiverServerGrpc.java |  28 +-
 .../server/ratis/ContainerStateMachine.java        |  87 ++--
 .../transport/server/ratis/XceiverServerRatis.java |  92 ++--
 .../container/common/utils/ContainerCache.java     |  24 +-
 .../ozone/container/common/volume/HddsVolume.java  |  29 +-
 .../container/common/volume/HddsVolumeChecker.java |   4 +-
 .../container/common/volume/MutableVolumeSet.java  |  13 +-
 .../ozone/container/common/volume/VolumeInfo.java  |  17 +-
 .../container/keyvalue/KeyValueContainer.java      |  49 +-
 .../container/keyvalue/KeyValueContainerCheck.java |   6 +-
 .../ozone/container/keyvalue/KeyValueHandler.java  |  17 +-
 .../container/keyvalue/helpers/BlockUtils.java     |  24 +-
 .../keyvalue/helpers/KeyValueContainerUtil.java    |   8 +-
 .../container/keyvalue/impl/BlockManagerImpl.java  |   6 +-
 .../keyvalue/impl/ChunkManagerFactory.java         |   4 +-
 .../background/BlockDeletingService.java           |  81 ++--
 .../ozone/container/ozoneimpl/ContainerReader.java |  30 +-
 .../ozone/container/ozoneimpl/OzoneContainer.java  |   6 +-
 .../replication/SimpleContainerDownloader.java     |   4 +-
 .../ozone/container/common/ContainerTestUtils.java |  21 +-
 .../ozone/container/common/SCMTestUtils.java       |  23 +-
 .../container/common/TestBlockDeletingService.java |  63 +--
 .../container/common/TestDatanodeStateMachine.java |  60 ++-
 .../container/common/interfaces/TestHandler.java   |  13 +-
 .../container/common/report/TestReportManager.java |  11 +-
 .../common/report/TestReportPublisher.java         |   4 +-
 .../common/report/TestReportPublisherFactory.java  |  14 +-
 .../states/endpoint/TestHeartbeatEndpointTask.java |  52 +-
 .../container/common/volume/TestHddsVolume.java    |  28 +-
 .../volume/TestRoundRobinVolumeChoosingPolicy.java |  22 +-
 .../common/volume/TestVolumeSetDiskChecks.java     |  14 +-
 .../keyvalue/TestKeyValueBlockIterator.java        |  43 +-
 .../container/keyvalue/TestKeyValueHandler.java    |   5 +-
 .../testutils/BlockDeletingServiceTestImpl.java    |  16 +-
 .../apache/hadoop/hdds/conf/HddsConfServlet.java   |  30 +-
 .../certificates/utils/CertificateSignRequest.java |  23 +-
 .../certificates/utils/SelfSignedCertificate.java  |  27 +-
 .../hdds/security/x509/keys/HDDSKeyGenerator.java  |  13 +-
 .../org/apache/hadoop/hdds/server/ServerUtils.java |  12 +-
 .../hadoop/hdds/server/http/BaseHttpServer.java    |  27 +-
 .../hadoop/hdds/server/http/FilterInitializer.java |   4 +-
 .../apache/hadoop/hdds/server/http/HttpConfig.java |   4 +-
 .../hadoop/hdds/server/http/HttpServer2.java       |  98 ++--
 .../hdds/server/http/StaticUserWebFilter.java      |   6 +-
 .../apache/hadoop/hdds/utils/HddsServerUtil.java   |  39 +-
 .../hadoop/hdds/utils/MetadataStoreBuilder.java    |  13 +-
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |   4 +-
 .../apache/hadoop/hdds/server/TestServerUtils.java |  21 +-
 .../hdds/server/http/TestBaseHttpServer.java       |   4 +-
 .../hadoop/hdds/utils/TestMetadataStore.java       |  13 +-
 .../hadoop/hdds/utils/TestRocksDBStoreMBean.java   |  23 +-
 .../hadoop/hdds/scm/SCMCommonPlacementPolicy.java  |  35 +-
 .../hadoop/hdds/scm/block/BlockManagerImpl.java    |  31 +-
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |   4 +-
 .../hdds/scm/block/SCMBlockDeletingService.java    |   4 +-
 .../hdds/scm/container/ContainerStateManager.java  |  17 +-
 .../hdds/scm/container/SCMContainerManager.java    |  52 +-
 .../ContainerPlacementPolicyFactory.java           |  11 +-
 .../algorithms/SCMContainerPlacementCapacity.java  |   4 +-
 .../algorithms/SCMContainerPlacementRackAware.java |   4 +-
 .../algorithms/SCMContainerPlacementRandom.java    |   4 +-
 .../hadoop/hdds/scm/node/NewNodeHandler.java       |  15 +-
 .../hadoop/hdds/scm/node/NodeStateManager.java     |  55 ++-
 .../scm/node/NonHealthyToHealthyNodeHandler.java   |   4 +-
 .../hadoop/hdds/scm/node/StaleNodeHandler.java     |   4 +-
 .../scm/pipeline/BackgroundPipelineCreator.java    |   6 +-
 .../hdds/scm/pipeline/PipelineActionHandler.java   |   4 +-
 .../hadoop/hdds/scm/pipeline/PipelineFactory.java  |   4 +-
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java |   6 +-
 .../hdds/scm/pipeline/PipelineReportHandler.java   |   6 +-
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  15 +-
 .../hdds/scm/pipeline/RatisPipelineUtils.java      |  11 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java      |  57 +--
 .../hdds/scm/safemode/ContainerSafeModeRule.java   |   4 +-
 .../hdds/scm/safemode/DataNodeSafeModeRule.java    |   4 +-
 .../scm/safemode/HealthyPipelineSafeModeRule.java  |  11 +-
 .../safemode/OneReplicaPipelineSafeModeRule.java   |   4 +-
 .../hdds/scm/safemode/SCMSafeModeManager.java      |  11 +-
 .../hadoop/hdds/scm/safemode/SafeModeHandler.java  |   4 +-
 .../hadoop/hdds/scm/safemode/SafeModePrecheck.java |   4 +-
 .../hdds/scm/server/StorageContainerManager.java   |  76 +--
 .../server/StorageContainerManagerHttpServer.java  |   8 +-
 .../apache/hadoop/hdds/scm/TestHddsServerUtil.java |  18 +-
 .../hadoop/hdds/scm/TestHddsServerUtils.java       |  42 +-
 .../scm/TestStorageContainerManagerHttpServer.java |   6 +-
 .../container/TestCloseContainerEventHandler.java  |  22 +-
 .../scm/container/TestContainerReportHandler.java  |   4 +-
 .../TestIncrementalContainerReportHandler.java     |   4 +-
 .../hdds/scm/container/TestReplicationManager.java |   4 +-
 .../scm/container/TestSCMContainerManager.java     |  49 +-
 .../scm/container/TestUnknownContainerReport.java  |   4 +-
 .../algorithms/TestContainerPlacementFactory.java  |  19 +-
 .../TestSCMContainerPlacementCapacity.java         |   4 +-
 .../TestSCMContainerPlacementRackAware.java        |   4 +-
 .../TestSCMContainerPlacementRandom.java           |   4 +-
 .../hdds/scm/node/TestContainerPlacement.java      |  38 +-
 .../scm/pipeline/MockRatisPipelineProvider.java    |  20 +-
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  31 +-
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  |  26 +-
 .../hdds/scm/safemode/TestSCMSafeModeManager.java  |  13 +-
 .../ozone/container/common/TestEndPoint.java       |   7 +-
 .../placement/TestContainerPlacement.java          |  32 +-
 .../hdds/scm/cli/ContainerOperationClient.java     |  17 +-
 .../apache/hadoop/ozone/client/ObjectStore.java    |   4 +-
 .../apache/hadoop/ozone/client/OzoneBucket.java    |   8 +-
 .../apache/hadoop/ozone/client/OzoneClient.java    |   4 +-
 .../hadoop/ozone/client/OzoneClientFactory.java    |  20 +-
 .../apache/hadoop/ozone/client/OzoneVolume.java    |  18 +-
 .../hadoop/ozone/client/rpc/OzoneKMSUtil.java      |  32 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  77 +--
 .../hadoop/ozone/client/TestHddsClientUtils.java   |  41 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |  33 +-
 .../apache/hadoop/ozone/freon/OzoneGetConf.java    |  24 +-
 .../ozone/om/ha/OMFailoverProxyProvider.java       |  50 +-
 .../hadoop/ozone/om/lock/OzoneManagerLock.java     |   4 +-
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   4 +-
 .../apache/hadoop/ozone/web/utils/OzoneUtils.java  |   7 +-
 .../ozone/contract/ITestOzoneContractCreate.java   |   5 +-
 .../ozone/contract/ITestOzoneContractDelete.java   |   5 +-
 .../ozone/contract/ITestOzoneContractDistCp.java   |   5 +-
 .../contract/ITestOzoneContractGetFileStatus.java  |   5 +-
 .../fs/ozone/contract/ITestOzoneContractMkdir.java |   5 +-
 .../fs/ozone/contract/ITestOzoneContractOpen.java  |   5 +-
 .../ozone/contract/ITestOzoneContractRename.java   |   5 +-
 .../ozone/contract/ITestOzoneContractRootDir.java  |   6 +-
 .../fs/ozone/contract/ITestOzoneContractSeek.java  |   5 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |  20 +-
 .../org/apache/hadoop/ozone/RatisTestHelper.java   |   3 +-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |  25 +-
 .../ozone/TestStorageContainerManagerHelper.java   |  10 +-
 .../ozone/client/CertificateClientTestImpl.java    |  24 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |  23 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |   9 +-
 .../ozone/dn/ratis/TestDnRatisLogParser.java       |  16 +-
 .../apache/hadoop/ozone/om/KeyDeletingService.java |   4 +-
 .../java/org/apache/hadoop/ozone/om/OMStorage.java |   4 +-
 .../hadoop/ozone/om/OzoneManagerHttpServer.java    |  10 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |  33 +-
 .../om/snapshot/OzoneManagerSnapshotProvider.java  |   6 +-
 .../org/apache/hadoop/ozone/om/TestOMStorage.java  |   9 +-
 .../ozone/om/TestOzoneManagerHttpServer.java       |  21 +-
 .../ozone/security/TestOzoneTokenIdentifier.java   |  19 +-
 .../java/org/apache/hadoop/fs/ozone/BasicOzFs.java |  12 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  17 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  20 +-
 .../org/apache/hadoop/fs/ozone/O3fsDtFetcher.java  |   8 +-
 .../main/java/org/apache/hadoop/fs/ozone/OzFs.java |  12 +-
 .../hadoop/fs/ozone/OzoneClientAdapterImpl.java    |   5 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java    |  10 +-
 .../org/apache/hadoop/fs/ozone/OzoneFsShell.java   |   5 +-
 .../fs/ozone/TestOzoneFileSystemWithMocks.java     |  31 +-
 .../hadoop/fs/ozone/TestReadWriteStatistics.java   |  41 +-
 .../hadoop/ozone/recon/ConfigurationProvider.java  |   5 +-
 .../org/apache/hadoop/ozone/recon/ReconUtils.java  |  27 +-
 .../ozone/recon/scm/ReconContainerManager.java     |  12 +-
 .../hadoop/ozone/recon/scm/ReconNodeManager.java   |  17 +-
 .../ozone/recon/scm/ReconPipelineManager.java      |  15 +-
 .../recon/scm/ReconPipelineReportHandler.java      |   4 +-
 .../hadoop/ozone/s3/S3GatewayHttpServer.java       |   4 +-
 .../org/apache/hadoop/ozone/admin/OzoneAdmin.java  |   4 +-
 .../apache/hadoop/ozone/freon/FreonHttpServer.java |   4 +-
 .../ozone/genesis/BenchMarkDatanodeDispatcher.java |   5 +-
 .../ozone/genesis/BenchmarkChunkManager.java       |  33 +-
 .../apache/hadoop/ozone/genesis/GenesisUtil.java   |  34 +-
 216 files changed, 3303 insertions(+), 2083 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 668fdaa..0771f4c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -18,10 +18,23 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.security.cert.X509Certificate;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
@@ -41,6 +54,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import io.opentracing.Scope;
 import io.opentracing.util.GlobalTracer;
 import org.apache.ratis.thirdparty.io.grpc.ManagedChannel;
@@ -52,28 +67,13 @@ import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.io.InterruptedIOException;
-import java.security.cert.X509Certificate;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
 /**
  * A Client for the storageContainer protocol for read object data.
  */
 public class XceiverClientGrpc extends XceiverClientSpi {
   static final Logger LOG = LoggerFactory.getLogger(XceiverClientGrpc.class);
   private final Pipeline pipeline;
-  private final Configuration config;
+  private final ConfigurationSource config;
   private Map<UUID, XceiverClientProtocolServiceStub> asyncStubs;
   private XceiverClientMetrics metrics;
   private Map<UUID, ManagedChannel> channels;
@@ -94,7 +94,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
    * @param config   -- Ozone Config
    * @param caCert   - SCM ca certificate.
    */
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config,
+  public XceiverClientGrpc(Pipeline pipeline, ConfigurationSource config,
       X509Certificate caCert) {
     super();
     Preconditions.checkNotNull(pipeline);
@@ -121,7 +121,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
    * @param pipeline - Pipeline that defines the machines.
    * @param config   -- Ozone Config
    */
-  public XceiverClientGrpc(Pipeline pipeline, Configuration config) {
+  public XceiverClientGrpc(Pipeline pipeline, ConfigurationSource config) {
     this(pipeline, config, null);
   }
 
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index 4ceff0b..0cfaca7 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -24,10 +24,11 @@ import com.google.common.cache.Cache;
 import com.google.common.cache.CacheBuilder;
 import com.google.common.cache.RemovalListener;
 import com.google.common.cache.RemovalNotification;
-import org.apache.hadoop.conf.Configuration;
+
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -69,7 +70,7 @@ public class XceiverClientManager implements Closeable {
   private static final Logger LOG =
       LoggerFactory.getLogger(XceiverClientManager.class);
   //TODO : change this to SCM configuration class
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final Cache<String, XceiverClientSpi> clientCache;
   private X509Certificate caCert;
 
@@ -83,12 +84,13 @@ public class XceiverClientManager implements Closeable {
    *
    * @param conf configuration
    */
-  public XceiverClientManager(Configuration conf) throws IOException {
+  public XceiverClientManager(ConfigurationSource conf) throws IOException {
     this(conf, OzoneConfiguration.of(conf).getObject(ScmClientConfig.class),
         null);
   }
 
-  public XceiverClientManager(Configuration conf, ScmClientConfig clientConf,
+  public XceiverClientManager(ConfigurationSource conf,
+      ScmClientConfig clientConf,
       String caCertPem) throws IOException {
     Preconditions.checkNotNull(clientConf);
     Preconditions.checkNotNull(conf);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 0d12355..ae5fe67 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -34,20 +34,25 @@ import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import io.opentracing.Scope;
+import io.opentracing.util.GlobalTracer;
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.apache.ratis.proto.RaftProtos;
@@ -61,12 +66,6 @@ import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferExce
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-import io.opentracing.Scope;
-import io.opentracing.util.GlobalTracer;
-
 /**
  * An abstract implementation of {@link XceiverClientSpi} using Ratis.
  * The underlying RPC mechanism can be chosen via the constructor.
@@ -77,13 +76,13 @@ public final class XceiverClientRatis extends XceiverClientSpi {
 
   public static XceiverClientRatis newXceiverClientRatis(
       org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
-      Configuration ozoneConf) {
+      ConfigurationSource ozoneConf) {
     return newXceiverClientRatis(pipeline, ozoneConf, null);
   }
 
   public static XceiverClientRatis newXceiverClientRatis(
       org.apache.hadoop.hdds.scm.pipeline.Pipeline pipeline,
-      Configuration ozoneConf, X509Certificate caCert) {
+      ConfigurationSource ozoneConf, X509Certificate caCert) {
     final String rpcType = ozoneConf
         .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
             ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
@@ -100,7 +99,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   private final AtomicReference<RaftClient> client = new AtomicReference<>();
   private final RetryPolicy retryPolicy;
   private final GrpcTlsConfig tlsConfig;
-  private final Configuration ozoneConfiguration;
+  private final ConfigurationSource ozoneConfiguration;
 
   // Map to track commit index at every server
   private final ConcurrentHashMap<UUID, Long> commitInfoMap;
@@ -112,7 +111,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
    */
   private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
       RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
-      Configuration configuration) {
+      ConfigurationSource configuration) {
     super();
     this.pipeline = pipeline;
     this.rpcType = rpcType;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index 6789969..8a6518d 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -29,9 +29,9 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.RatisClientConfig;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -213,17 +213,16 @@ public final class HddsClientUtils {
    * @param conf Configuration object
    * @return list cache size
    */
-  public static int getListCacheSize(Configuration conf) {
+  public static int getListCacheSize(ConfigurationSource conf) {
     return conf.getInt(OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE,
         OzoneConfigKeys.OZONE_CLIENT_LIST_CACHE_SIZE_DEFAULT);
   }
 
-
   /**
    * Returns the maximum no of outstanding async requests to be handled by
    * Standalone and Ratis client.
    */
-  public static int getMaxOutstandingRequests(Configuration config) {
+  public static int getMaxOutstandingRequests(ConfigurationSource config) {
     return OzoneConfiguration.of(config)
         .getObject(RatisClientConfig.class)
         .getMaxOutstandingRequests();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index acbd456..d1c2bc3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -33,11 +33,11 @@ import java.util.Optional;
 import java.util.OptionalInt;
 import java.util.TimeZone;
 
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -89,7 +89,8 @@ public final class HddsUtils {
    *
    * @return Target {@code InetSocketAddress} for the SCM client endpoint.
    */
-  public static InetSocketAddress getScmAddressForClients(Configuration conf) {
+  public static InetSocketAddress getScmAddressForClients(
+      ConfigurationSource conf) {
     Optional<String> host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
 
@@ -116,7 +117,7 @@ public final class HddsUtils {
    * @throws IllegalArgumentException if configuration is not defined.
    */
   public static InetSocketAddress getScmAddressForBlockClients(
-      Configuration conf) {
+      ConfigurationSource conf) {
     Optional<String> host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY,
         ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
@@ -147,7 +148,8 @@ public final class HddsUtils {
    * @throws IllegalArgumentException if any values are not in the 'host'
    *             or host:port format.
    */
-  public static Optional<String> getHostNameFromConfigKeys(Configuration conf,
+  public static Optional<String> getHostNameFromConfigKeys(
+      ConfigurationSource conf,
       String... keys) {
     for (final String key : keys) {
       final String value = conf.getTrimmed(key);
@@ -206,7 +208,7 @@ public final class HddsUtils {
    *             or host:port format.
    */
   public static OptionalInt getPortNumberFromConfigKeys(
-      Configuration conf, String... keys) {
+      ConfigurationSource conf, String... keys) {
     for (final String key : keys) {
       final String value = conf.getTrimmed(key);
       final OptionalInt hostPort = getHostPort(value);
@@ -224,7 +226,7 @@ public final class HddsUtils {
    * @throws IllegalArgumentException If the configuration is invalid
    */
   public static Collection<InetSocketAddress> getSCMAddresses(
-      Configuration conf) {
+      ConfigurationSource conf) {
     Collection<String> names =
         conf.getTrimmedStringCollection(ScmConfigKeys.OZONE_SCM_NAMES);
     if (names.isEmpty()) {
@@ -255,7 +257,7 @@ public final class HddsUtils {
    * @throws IllegalArgumentException If the configuration is invalid
    */
   public static InetSocketAddress getReconAddresses(
-      Configuration conf) {
+      ConfigurationSource conf) {
     String name = conf.get(OZONE_RECON_ADDRESS_KEY);
     if (StringUtils.isEmpty(name)) {
       return null;
@@ -277,7 +279,8 @@ public final class HddsUtils {
    * @throws IllegalArgumentException if {@code conf} has more than one SCM
    *         address or it has none
    */
-  public static InetSocketAddress getSingleSCMAddress(Configuration conf) {
+  public static InetSocketAddress getSingleSCMAddress(
+      ConfigurationSource conf) {
     Collection<InetSocketAddress> singleton = getSCMAddresses(conf);
     Preconditions.checkArgument(singleton.size() == 1,
         MULTIPLE_SCM_NOT_YET_SUPPORTED);
@@ -295,7 +298,7 @@ public final class HddsUtils {
    * @throws UnknownHostException if the dfs.datanode.dns.interface
    *    option is used and the hostname can not be determined
    */
-  public static String getHostName(Configuration conf)
+  public static String getHostName(ConfigurationSource conf)
       throws UnknownHostException {
     String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
     if (name == null) {
@@ -498,7 +501,7 @@ public final class HddsUtils {
    * @param alias name of the credential to retreive
    * @return String credential value or null
    */
-  static String getPassword(Configuration conf, String alias) {
+  static String getPassword(ConfigurationSource conf, String alias) {
     String password = null;
     try {
       char[] passchars = conf.getPassword(alias);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
index fda3c86..c15f06a 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/conf/OzoneConfiguration.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.conf;
 
-import javax.annotation.PostConstruct;
 import javax.xml.bind.JAXBContext;
 import javax.xml.bind.JAXBException;
 import javax.xml.bind.Unmarshaller;
@@ -27,30 +26,44 @@ import javax.xml.bind.annotation.XmlAccessorType;
 import javax.xml.bind.annotation.XmlElement;
 import javax.xml.bind.annotation.XmlRootElement;
 import java.io.IOException;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.net.URL;
 import java.util.ArrayList;
+import java.util.Collection;
 import java.util.Enumeration;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Properties;
+import java.util.stream.Collectors;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+
+import com.google.common.base.Preconditions;
 
 /**
  * Configuration for ozone.
  */
 @InterfaceAudience.Private
-public class OzoneConfiguration extends Configuration {
+public class OzoneConfiguration extends Configuration
+    implements ConfigurationSource {
   static {
     activate();
   }
 
+  public static OzoneConfiguration of(ConfigurationSource source) {
+    if (source instanceof LegacyHadoopConfigurationSource) {
+      return new OzoneConfiguration(((LegacyHadoopConfigurationSource) source)
+          .getOriginalHadoopConfiguration());
+    }
+    return (OzoneConfiguration) source;
+  }
+
+  public static OzoneConfiguration of(OzoneConfiguration source) {
+    return source;
+  }
+
   public static OzoneConfiguration of(Configuration conf) {
     Preconditions.checkNotNull(conf);
 
@@ -99,157 +112,6 @@ public class OzoneConfiguration extends Configuration {
   }
 
   /**
-   * Create a Configuration object and inject the required configuration values.
-   *
-   * @param configurationClass The class where the fields are annotated with
-   *                           the configuration.
-   * @return Initiated java object where the config fields are injected.
-   */
-  public <T> T getObject(Class<T> configurationClass) {
-
-    T configObject;
-
-    try {
-      configObject = configurationClass.newInstance();
-    } catch (InstantiationException | IllegalAccessException e) {
-      throw new ConfigurationException(
-          "Configuration class can't be created: " + configurationClass, e);
-    }
-    ConfigGroup configGroup =
-        configurationClass.getAnnotation(ConfigGroup.class);
-    
-    String prefix = configGroup.prefix();
-
-    injectConfiguration(configurationClass, configObject, prefix);
-
-    callPostConstruct(configurationClass, configObject);
-
-    return configObject;
-
-  }
-
-  private <T> void injectConfiguration(Class<T> configurationClass,
-      T configObject, String prefix) {
-    injectConfigurationToObject(configurationClass, configObject, prefix);
-    Class<? super T> superClass = configurationClass.getSuperclass();
-    while (superClass != null) {
-      injectConfigurationToObject(superClass, configObject, prefix);
-      superClass = superClass.getSuperclass();
-    }
-  }
-
-  private <T> void callPostConstruct(Class<T> configurationClass,
-      T configObject) {
-    for (Method method : configurationClass.getMethods()) {
-      if (method.isAnnotationPresent(PostConstruct.class)) {
-        try {
-          method.invoke(configObject);
-        } catch (IllegalAccessException ex) {
-          throw new IllegalArgumentException(
-              "@PostConstruct method in " + configurationClass
-                  + " is not accessible");
-        } catch (InvocationTargetException e) {
-          if (e.getCause() instanceof RuntimeException) {
-            throw (RuntimeException) e.getCause();
-          } else {
-            throw new IllegalArgumentException(
-                "@PostConstruct can't be executed on " + configurationClass
-                    + " after configObject "
-                    + "injection", e);
-          }
-        }
-      }
-    }
-  }
-
-  private <T> void injectConfigurationToObject(Class<T> configurationClass,
-      T configuration, String prefix) {
-    for (Field field : configurationClass.getDeclaredFields()) {
-      if (field.isAnnotationPresent(Config.class)) {
-
-        String fieldLocation =
-            configurationClass + "." + field.getName();
-
-        Config configAnnotation = field.getAnnotation(Config.class);
-
-        String key = prefix + "." + configAnnotation.key();
-
-        ConfigType type = configAnnotation.type();
-
-        if (type == ConfigType.AUTO) {
-          type = detectConfigType(field.getType(), fieldLocation);
-        }
-
-        //Note: default value is handled by ozone-default.xml. Here we can
-        //use any default.
-        try {
-          switch (type) {
-          case STRING:
-            forcedFieldSet(field, configuration, get(key));
-            break;
-          case INT:
-            forcedFieldSet(field, configuration, getInt(key, 0));
-            break;
-          case BOOLEAN:
-            forcedFieldSet(field, configuration, getBoolean(key, false));
-            break;
-          case LONG:
-            forcedFieldSet(field, configuration, getLong(key, 0));
-            break;
-          case TIME:
-            forcedFieldSet(field, configuration,
-                getTimeDuration(key, 0, configAnnotation.timeUnit()));
-            break;
-          default:
-            throw new ConfigurationException(
-                "Unsupported ConfigType " + type + " on " + fieldLocation);
-          }
-        } catch (IllegalAccessException e) {
-          throw new ConfigurationException(
-              "Can't inject configuration to " + fieldLocation, e);
-        }
-
-      }
-    }
-  }
-
-  /**
-   * Set the value of one field even if it's private.
-   */
-  private <T> void forcedFieldSet(Field field, T object, Object value)
-      throws IllegalAccessException {
-    boolean accessChanged = false;
-    if (!field.isAccessible()) {
-      field.setAccessible(true);
-      accessChanged = true;
-    }
-    field.set(object, value);
-    if (accessChanged) {
-      field.setAccessible(false);
-    }
-  }
-
-  private ConfigType detectConfigType(Class<?> parameterType,
-      String methodLocation) {
-    ConfigType type;
-    if (parameterType == String.class) {
-      type = ConfigType.STRING;
-    } else if (parameterType == Integer.class || parameterType == int.class) {
-      type = ConfigType.INT;
-    } else if (parameterType == Long.class || parameterType == long.class) {
-      type = ConfigType.LONG;
-    } else if (parameterType == Boolean.class
-        || parameterType == boolean.class) {
-      type = ConfigType.BOOLEAN;
-    } else {
-      throw new ConfigurationException(
-          "Unsupported configuration type " + parameterType + " in "
-              + methodLocation);
-    }
-    return type;
-  }
-
-  /**
    * Class to marshall/un-marshall configuration from xml files.
    */
   @XmlAccessorType(XmlAccessType.FIELD)
@@ -380,6 +242,14 @@ public class OzoneConfiguration extends Configuration {
   }
 
   @Override
+  public Collection<String> getConfigKeys() {
+    return getProps().keySet()
+        .stream()
+        .map(Object::toString)
+        .collect(Collectors.toList());
+  }
+
+  @Override
   public Map<String, String> getPropsWithPrefix(String confPrefix) {
     Properties props = getProps();
     Map<String, String> configMap = new HashMap<>();
@@ -392,4 +262,5 @@ public class OzoneConfiguration extends Configuration {
     }
     return configMap;
   }
+  
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
index 118da1f..5b57b34 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
@@ -17,16 +17,16 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.time.Duration;
+
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
-import java.io.File;
-import java.time.Duration;
-
 /**
  * Uses DU for all volumes.  Saves used value in cache file.
  */
@@ -40,7 +40,8 @@ public class DUFactory implements SpaceUsageCheckFactory {
   private Conf conf;
 
   @Override
-  public SpaceUsageCheckFactory setConfiguration(Configuration configuration) {
+  public SpaceUsageCheckFactory setConfiguration(
+      ConfigurationSource configuration) {
     conf = OzoneConfiguration.of(configuration).getObject(Conf.class);
     return this;
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
index 37953d9..3ed74c9 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
@@ -17,16 +17,16 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.time.Duration;
+
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
 import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
-import java.io.File;
-import java.time.Duration;
-
 /**
  * Uses DedicatedDiskSpaceUsage for all volumes.  Does not save results since
  * the information is relatively cheap to obtain.
@@ -38,7 +38,8 @@ public class DedicatedDiskSpaceUsageFactory implements SpaceUsageCheckFactory {
   private Conf conf;
 
   @Override
-  public SpaceUsageCheckFactory setConfiguration(Configuration configuration) {
+  public SpaceUsageCheckFactory setConfiguration(
+      ConfigurationSource configuration) {
     conf = OzoneConfiguration.of(configuration).getObject(Conf.class);
     return this;
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
index 3a3960b..0205de5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
@@ -17,21 +17,22 @@
  */
 package org.apache.hadoop.hdds.fs;
 
+import java.io.File;
+import java.io.UncheckedIOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.Config;
 import org.apache.hadoop.hdds.conf.ConfigGroup;
 import org.apache.hadoop.hdds.conf.ConfigTag;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.UncheckedIOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-
 /**
  * Configures disk space checks (du, df, etc.) for HDDS volumes, allowing
  * different implementations and parameters for different volumes.
@@ -57,7 +58,7 @@ public interface SpaceUsageCheckFactory {
    * Updates the factory with global configuration.
    * @return factory configured with {@code conf}
    */
-  default SpaceUsageCheckFactory setConfiguration(Configuration conf) {
+  default SpaceUsageCheckFactory setConfiguration(ConfigurationSource conf) {
     // override if configurable
     return this;
   }
@@ -68,14 +69,16 @@ public interface SpaceUsageCheckFactory {
    * Defaults to {@link DUFactory} if no class is configured or it cannot be
    * instantiated.
    */
-  static SpaceUsageCheckFactory create(Configuration config) {
+  static SpaceUsageCheckFactory create(ConfigurationSource config) {
     Conf conf = OzoneConfiguration.of(config).getObject(Conf.class);
     Class<? extends SpaceUsageCheckFactory> aClass = null;
     String className = conf.getClassName();
     if (className != null && !className.isEmpty()) {
       try {
-        aClass = config.getClassByName(className)
-            .asSubclass(SpaceUsageCheckFactory.class);
+        aClass =
+            SpaceUsageCheckFactory.class
+                .getClassLoader().loadClass(className)
+                .asSubclass(SpaceUsageCheckFactory.class);
       } catch (ClassNotFoundException | RuntimeException e) {
         Logger log = LoggerFactory.getLogger(SpaceUsageCheckFactory.class);
         log.warn("Error trying to create SpaceUsageCheckFactory: '{}'",
@@ -91,7 +94,8 @@ public interface SpaceUsageCheckFactory {
             aClass.getConstructor();
         instance = constructor.newInstance();
       } catch (IllegalAccessException | InstantiationException |
-          InvocationTargetException | NoSuchMethodException e) {
+          InvocationTargetException | NoSuchMethodException |
+          ClassCastException e) {
 
         Logger log = LoggerFactory.getLogger(SpaceUsageCheckFactory.class);
         log.warn("Error trying to create {}", aClass, e);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 8fab633..08ead55 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -28,7 +28,7 @@ import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -146,27 +146,27 @@ public interface RatisHelper {
 
   static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline,
       RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
-      Configuration ozoneConfiguration) throws IOException {
+      ConfigurationSource ozoneConfiguration) throws IOException {
     return newRaftClient(rpcType,
         toRaftPeerId(pipeline.getLeaderNode()),
         newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()),
             pipeline.getNodes()), retryPolicy, tlsConfig, ozoneConfiguration);
   }
 
-  static RpcType getRpcType(Configuration conf) {
+  static RpcType getRpcType(ConfigurationSource conf) {
     return SupportedRpcType.valueOfIgnoreCase(conf.get(
         ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
         ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT));
   }
 
-  static RaftClient newRaftClient(RaftPeer leader, Configuration conf) {
+  static RaftClient newRaftClient(RaftPeer leader, ConfigurationSource conf) {
     return newRaftClient(getRpcType(conf), leader,
         RatisHelper.createRetryPolicy(conf), conf);
   }
 
   static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
       RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
-      Configuration configuration) {
+      ConfigurationSource configuration) {
     return newRaftClient(rpcType, leader.getId(),
         newRaftGroup(Collections.singletonList(leader)), retryPolicy,
         tlsConfig, configuration);
@@ -174,7 +174,7 @@ public interface RatisHelper {
 
   static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
       RetryPolicy retryPolicy,
-      Configuration ozoneConfiguration) {
+      ConfigurationSource ozoneConfiguration) {
     return newRaftClient(rpcType, leader.getId(),
         newRaftGroup(Collections.singletonList(leader)), retryPolicy, null,
         ozoneConfiguration);
@@ -183,7 +183,7 @@ public interface RatisHelper {
   @SuppressWarnings("checkstyle:ParameterNumber")
   static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader,
       RaftGroup group, RetryPolicy retryPolicy,
-      GrpcTlsConfig tlsConfig, Configuration ozoneConfiguration) {
+      GrpcTlsConfig tlsConfig, ConfigurationSource ozoneConfiguration) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("newRaftClient: {}, leader={}, group={}",
           rpcType, leader, group);
@@ -215,7 +215,7 @@ public interface RatisHelper {
    * @param ozoneConf
    * @param raftProperties
    */
-  static void createRaftClientProperties(Configuration ozoneConf,
+  static void createRaftClientProperties(ConfigurationSource ozoneConf,
       RaftProperties raftProperties) {
 
     // As for client we do not require server and grpc server/tls. exclude them.
@@ -238,7 +238,7 @@ public interface RatisHelper {
    * @param ozoneConf
    * @param raftProperties
    */
-  static void createRaftServerProperties(Configuration ozoneConf,
+  static void createRaftServerProperties(ConfigurationSource ozoneConf,
        RaftProperties raftProperties) {
 
     Map<String, String> ratisServerConf =
@@ -253,7 +253,7 @@ public interface RatisHelper {
 
 
   static Map<String, String> getDatanodeRatisPrefixProps(
-      Configuration configuration) {
+      ConfigurationSource configuration) {
     return configuration.getPropsWithPrefix(HDDS_DATANODE_RATIS_PREFIX_KEY);
   }
 
@@ -269,11 +269,7 @@ public interface RatisHelper {
     return tlsConfig;
   }
 
-
-
-
-
-  static RetryPolicy createRetryPolicy(Configuration conf) {
+  static RetryPolicy createRetryPolicy(ConfigurationSource conf) {
     int maxRetryCount =
         conf.getInt(OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY,
             OzoneConfigKeys.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
index c51d66a..dc44392 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ByteStringConversion.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.apache.ratis.thirdparty.com.google.protobuf.UnsafeByteOperations;
@@ -44,7 +44,7 @@ public final class ByteStringConversion {
    * @see ByteBuffer
    */
   public static Function<ByteBuffer, ByteString> createByteBufferConversion(
-      Configuration config){
+      ConfigurationSource config){
     boolean unsafeEnabled =
         config!=null && config.getBoolean(
             OzoneConfigKeys.OZONE_UNSAFEBYTEOPERATIONS_ENABLED,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
index 58394f1..1db46c4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NetworkTopologyImpl.java
@@ -34,7 +34,7 @@ import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.SCOPE_REVERSE_STR;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ANCESTOR_GENERATION_DEFAULT;
@@ -60,7 +60,7 @@ public class NetworkTopologyImpl implements NetworkTopology{
   /** Lock to coordinate cluster tree access. */
   private ReadWriteLock netlock = new ReentrantReadWriteLock(true);
 
-  public NetworkTopologyImpl(Configuration conf) {
+  public NetworkTopologyImpl(ConfigurationSource conf) {
     schemaManager = NodeSchemaManager.getInstance();
     schemaManager.init(conf);
     maxLevel = schemaManager.getMaxLevel();
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java
index c60c2c8..698b9da 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/net/NodeSchemaManager.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.scm.net.NodeSchemaLoader.NodeSchemaLoadResult;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 
 import java.util.ArrayList;
 import java.util.Arrays;
@@ -55,7 +55,7 @@ public final class NodeSchemaManager {
     return instance;
   }
 
-  public void init(Configuration conf) {
+  public void init(ConfigurationSource conf) {
     /**
      * Load schemas from network topology schema configuration file
      */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
index 9d077f6..394a0c3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/SecurityConfig.java
@@ -19,14 +19,6 @@
 
 package org.apache.hadoop.hdds.security.x509;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider;
-import org.bouncycastle.jce.provider.BouncyCastleProvider;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.nio.file.Path;
 import java.nio.file.Paths;
 import java.security.Provider;
@@ -34,6 +26,10 @@ import java.security.Security;
 import java.time.Duration;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+
+import com.google.common.base.Preconditions;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_DEFAULT_KEY_ALGORITHM;
@@ -71,6 +67,10 @@ import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslProvider;
+import org.bouncycastle.jce.provider.BouncyCastleProvider;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A class that deals with all Security related configs in HDDS.
@@ -82,7 +82,7 @@ public class SecurityConfig {
   private static final Logger LOG =
       LoggerFactory.getLogger(SecurityConfig.class);
   private static volatile Provider provider;
-  private final Configuration configuration;
+  private final ConfigurationSource configuration;
   private final int size;
   private final String keyAlgo;
   private final String providerString;
@@ -106,7 +106,7 @@ public class SecurityConfig {
    *
    * @param configuration - HDDS Configuration
    */
-  public SecurityConfig(Configuration configuration) {
+  public SecurityConfig(ConfigurationSource configuration) {
     Preconditions.checkNotNull(configuration, "Configuration cannot be null");
     this.configuration = configuration;
     this.size = this.configuration.getInt(HDDS_KEY_LEN, HDDS_DEFAULT_KEY_LEN);
@@ -305,7 +305,7 @@ public class SecurityConfig {
    *
    * @return Configuration
    */
-  public Configuration getConfiguration() {
+  public ConfigurationSource getConfiguration() {
     return configuration;
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
index fcfa613..a9a2ab8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/tracing/TracingUtil.java
@@ -27,6 +27,7 @@ import io.opentracing.SpanContext;
 import io.opentracing.Tracer;
 import io.opentracing.util.GlobalTracer;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 
 /**
@@ -43,7 +44,7 @@ public final class TracingUtil {
    * Initialize the tracing with the given service name.
    */
   public static void initTracing(
-      String serviceName, org.apache.hadoop.conf.Configuration conf) {
+      String serviceName, ConfigurationSource conf) {
     if (!GlobalTracer.isRegistered() && isTracingEnabled(conf)) {
       Configuration config = Configuration.fromEnv(serviceName);
       JaegerTracer tracer = config.getTracerBuilder()
@@ -116,7 +117,7 @@ public final class TracingUtil {
    * calls to the delegate and also enables tracing.
    */
   public static <T> T createProxy(
-      T delegate, Class<T> itf, org.apache.hadoop.conf.Configuration conf) {
+      T delegate, Class<T> itf, ConfigurationSource conf) {
     if (!isTracingEnabled(conf)) {
       return delegate;
     }
@@ -127,7 +128,7 @@ public final class TracingUtil {
   }
 
   private static boolean isTracingEnabled(
-      org.apache.hadoop.conf.Configuration conf) {
+      ConfigurationSource conf) {
     return conf.getBoolean(
           ScmConfigKeys.HDDS_TRACING_ENABLED,
           ScmConfigKeys.HDDS_TRACING_ENABLED_DEFAULT);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java
new file mode 100644
index 0000000..badc916
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/LegacyHadoopConfigurationSource.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.utils;
+
+import java.util.Collection;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+
+/**
+ * Configuration source to wrap Hadoop Configuration object.
+ */
+public class LegacyHadoopConfigurationSource implements ConfigurationSource {
+
+  private Configuration configuration;
+
+  public LegacyHadoopConfigurationSource(
+      Configuration configuration) {
+    this.configuration = configuration;
+  }
+
+  @Override
+  public String get(String key) {
+    return configuration.getRaw(key);
+  }
+
+  @Override
+  public Collection<String> getConfigKeys() {
+    return configuration.getPropsWithPrefix("").keySet();
+  }
+
+  @Override
+  public void set(String key, String value) {
+    configuration.set(key, value);
+  }
+
+  /**
+   * Helper method to get original Hadoop configuration for legacy Hadoop
+   * libraries.
+   * <p>
+   * It can work on server side but not on client side where we might have
+   * different configuration.
+   */
+  public static Configuration asHadoopConfiguration(
+      ConfigurationSource config) {
+    if (config instanceof Configuration) {
+      return (Configuration) config;
+    } else if (config instanceof LegacyHadoopConfigurationSource) {
+      return ((LegacyHadoopConfigurationSource) config).configuration;
+    } else {
+      throw new IllegalArgumentException(
+          "Core Hadoop code requires real Hadoop configuration");
+    }
+  }
+
+  public Configuration getOriginalHadoopConfiguration() {
+    return configuration;
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java
index 0bdddaf..726862c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneSecurityUtil.java
@@ -18,18 +18,6 @@
 
 package org.apache.hadoop.ozone;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
-import org.apache.commons.validator.routines.InetAddressValidator;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.IOException;
 import java.net.InetAddress;
@@ -42,6 +30,18 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+
+import org.apache.commons.validator.routines.InetAddressValidator;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HTTP_SECURITY_ENABLED_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Ozone security Util class.
  */
@@ -58,12 +58,12 @@ public final class OzoneSecurityUtil {
   private OzoneSecurityUtil() {
   }
 
-  public static boolean isSecurityEnabled(Configuration conf) {
+  public static boolean isSecurityEnabled(ConfigurationSource conf) {
     return conf.getBoolean(OZONE_SECURITY_ENABLED_KEY,
         OZONE_SECURITY_ENABLED_DEFAULT);
   }
 
-  public static boolean isHttpSecurityEnabled(Configuration conf) {
+  public static boolean isHttpSecurityEnabled(ConfigurationSource conf) {
     return isSecurityEnabled(conf) &&
         conf.getBoolean(OZONE_HTTP_SECURITY_ENABLED_KEY,
         OZONE_HTTP_SECURITY_ENABLED_DEFAULT);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
index 84aca6d..ac6e9a1 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/lock/LockManager.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone.lock;
 
 import org.apache.commons.pool2.impl.GenericObjectPool;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -44,7 +44,7 @@ public class LockManager<R> {
    *
    * @param conf Configuration object
    */
-  public LockManager(final Configuration conf) {
+  public LockManager(final ConfigurationSource conf) {
     this(conf, false);
   }
 
@@ -55,7 +55,7 @@ public class LockManager<R> {
    * @param conf Configuration object
    * @param fair - true to use fair lock ordering, else non-fair lock ordering.
    */
-  public LockManager(final Configuration conf, boolean fair) {
+  public LockManager(final ConfigurationSource conf, boolean fair) {
     lockPool =
         new GenericObjectPool<>(new PooledLockFactory(fair));
     lockPool.setMaxTotal(-1);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
index ddb1f2b..7530bd0 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/TestHddsUtils.java
@@ -25,18 +25,17 @@ import java.util.Iterator;
 import java.util.Map;
 import java.util.Optional;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.test.LambdaTestUtils;
-import org.junit.Assert;
-import org.junit.Test;
 
 import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses;
 import static org.hamcrest.core.Is.is;
+import org.junit.Assert;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import org.junit.Test;
 
 /**
  * Testing HddsUtils.
@@ -76,7 +75,7 @@ public class TestHddsUtils {
 
   @Test
   public void testGetSCMAddresses() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     Collection<InetSocketAddress> addresses;
     InetSocketAddress addr;
     Iterator<InetSocketAddress> it;
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
index ee724e2..5ab16ab 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/conf/TestOzoneConfiguration.java
@@ -17,26 +17,26 @@
  */
 package org.apache.hadoop.hdds.conf;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.Path;
-import org.junit.Rule;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.Assert;
-import org.junit.rules.TemporaryFolder;
-
 import java.io.BufferedWriter;
 import java.io.File;
 import java.io.FileWriter;
 import java.io.IOException;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.fs.Path;
+
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
 /**
  * Test class for OzoneConfiguration.
  */
 public class TestOzoneConfiguration {
 
-  private Configuration conf;
+  private OzoneConfiguration conf;
 
   @Rule
   public TemporaryFolder tempConfigs = new TemporaryFolder();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
index e651174..7a8701e 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
 import java.io.File;
 import java.time.Duration;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
 import static org.apache.hadoop.hdds.fs.DUFactory.Conf.configKeyForRefreshPeriod;
 import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import org.junit.Test;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertSame;
 
@@ -40,7 +40,7 @@ public class TestDUFactory {
 
   @Test
   public void testParams() {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(configKeyForRefreshPeriod(), "1h");
     File dir = getTestDir(getClass().getSimpleName());
 
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
index e3015b5..d0dfe60 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.conf.Configuration;
-import org.junit.Test;
-
 import java.io.File;
 import java.time.Duration;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
 import static org.apache.hadoop.hdds.fs.DedicatedDiskSpaceUsageFactory.Conf.configKeyForRefreshPeriod;
 import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import org.junit.Test;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertSame;
 
@@ -41,7 +41,7 @@ public class TestDedicatedDiskSpaceUsageFactory {
 
   @Test
   public void testParams() {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(configKeyForRefreshPeriod(), "2m");
     File dir = getTestDir(getClass().getSimpleName());
 
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
index 09b8cc2..4f53b26 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
@@ -17,19 +17,20 @@
  */
 package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+
 import static org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory.Conf.configKeyForClassName;
+import org.junit.Before;
+import org.junit.Test;
 import static org.junit.jupiter.api.Assertions.assertEquals;
 import static org.junit.jupiter.api.Assertions.assertNotNull;
 import static org.junit.jupiter.api.Assertions.assertSame;
 import static org.junit.jupiter.api.Assertions.assertTrue;
+import org.slf4j.LoggerFactory;
 
 /**
  * Tests for {@link SpaceUsageCheckFactory}.
@@ -39,7 +40,8 @@ public class TestSpaceUsageFactory {
   private LogCapturer capturer;
 
   /**
-   * Verifies that {@link SpaceUsageCheckFactory#create(Configuration)} creates
+   * Verifies that {@link SpaceUsageCheckFactory#create(ConfigurationSource)}
+   * creates
    * the correct implementation if configured.  This should be called from each
    * specific implementation's test class.
    * @return the instance created, so that further checks can done, if needed
@@ -47,7 +49,7 @@ public class TestSpaceUsageFactory {
   protected static <T extends SpaceUsageCheckFactory> T testCreateViaConfig(
       Class<T> factoryClass) {
 
-    Configuration conf = configFor(factoryClass);
+    OzoneConfiguration conf = configFor(factoryClass);
 
     SpaceUsageCheckFactory factory = SpaceUsageCheckFactory.create(conf);
 
@@ -104,10 +106,10 @@ public class TestSpaceUsageFactory {
         "in log output, but only got: " + output);
   }
 
-  private static <T extends SpaceUsageCheckFactory> Configuration configFor(
-      Class<T> factoryClass) {
+  private static <T extends SpaceUsageCheckFactory> OzoneConfiguration
+      configFor(Class<T> factoryClass) {
 
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.setClass(configKeyForClassName(),
         factoryClass, SpaceUsageCheckFactory.class);
 
@@ -116,12 +118,12 @@ public class TestSpaceUsageFactory {
 
   private static void testDefaultFactoryForBrokenImplementation(
       Class<? extends SpaceUsageCheckFactory> brokenImplementationClass) {
-    Configuration conf = configFor(brokenImplementationClass);
+    OzoneConfiguration conf = configFor(brokenImplementationClass);
     assertCreatesDefaultImplementation(conf);
   }
 
   private void testDefaultFactoryForWrongConfig(String value) {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(configKeyForClassName(), value);
 
     assertCreatesDefaultImplementation(conf);
@@ -133,7 +135,8 @@ public class TestSpaceUsageFactory {
     }
   }
 
-  private static void assertCreatesDefaultImplementation(Configuration conf) {
+  private static void assertCreatesDefaultImplementation(
+      OzoneConfiguration conf) {
     // given
     // conf
 
@@ -171,15 +174,16 @@ public class TestSpaceUsageFactory {
   }
 
   /**
-   * Spy factory to verify {@link SpaceUsageCheckFactory#create(Configuration)}
+   * Spy factory to verify
+   * {@link SpaceUsageCheckFactory#create(ConfigurationSource)}
    * properly configures it.
    */
   public static final class SpyFactory implements SpaceUsageCheckFactory {
 
-    private Configuration conf;
+    private ConfigurationSource conf;
 
     @Override
-    public SpaceUsageCheckFactory setConfiguration(Configuration config) {
+    public SpaceUsageCheckFactory setConfiguration(ConfigurationSource config) {
       this.conf = config;
       return this;
     }
@@ -189,7 +193,7 @@ public class TestSpaceUsageFactory {
       throw new UnsupportedOperationException();
     }
 
-    public Configuration getConf() {
+    public ConfigurationSource getConf() {
       return conf;
     }
   }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
index 6044666..c8dfd2c 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNetworkTopologyImpl.java
@@ -17,23 +17,6 @@
  */
 package org.apache.hadoop.hdds.scm.net;
 
-import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA;
-import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
-
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
@@ -42,6 +25,18 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+
+import static org.apache.hadoop.hdds.scm.net.NetConstants.DATACENTER_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.NODEGROUP_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.PATH_SEPARATOR_STR;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.REGION_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
@@ -49,9 +44,14 @@ import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 import static org.junit.Assume.assumeTrue;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
-import org.junit.runner.RunWith;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Test the network topology functions. */
 @RunWith(Parameterized.class)
@@ -221,7 +221,7 @@ public class TestNetworkTopologyImpl {
   @Test
   public void testInitWithConfigFile() {
     ClassLoader classLoader = Thread.currentThread().getContextClassLoader();
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     try {
       String filePath = classLoader.getResource(
           "./networkTopologyTestFiles/good.xml").getPath();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
index 6698043..ae97155 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/scm/net/TestNodeSchemaManager.java
@@ -17,19 +17,19 @@
  */
 package org.apache.hadoop.hdds.scm.net;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.Timeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_NODEGROUP;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.DEFAULT_RACK;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /** Test the node schema loader. */
 public class TestNodeSchemaManager {
@@ -38,10 +38,10 @@ public class TestNodeSchemaManager {
   private ClassLoader classLoader =
       Thread.currentThread().getContextClassLoader();
   private NodeSchemaManager manager;
-  private Configuration conf;
+  private OzoneConfiguration conf;
 
   public TestNodeSchemaManager() {
-    conf = new Configuration();
+    conf = new OzoneConfiguration();
     String filePath = classLoader.getResource(
         "./networkTopologyTestFiles/good.xml").getPath();
     conf.set(ScmConfigKeys.OZONE_SCM_NETWORK_TOPOLOGY_SCHEMA_FILE, filePath);
diff --git a/hadoop-hdds/config/pom.xml b/hadoop-hdds/config/pom.xml
index 98690c4..105e8ac 100644
--- a/hadoop-hdds/config/pom.xml
+++ b/hadoop-hdds/config/pom.xml
@@ -33,7 +33,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
   </properties>
 
   <dependencies>
-
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-api</artifactId>
+    </dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-test-utils</artifactId>
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
new file mode 100644
index 0000000..48f06ea
--- /dev/null
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import javax.annotation.PostConstruct;
+import java.lang.reflect.Field;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
+
+/**
+ * Reflection utilities for configuration injection.
+ */
+public final class ConfigurationReflectionUtil {
+
+  private ConfigurationReflectionUtil() {
+  }
+
+  public static <T> void injectConfiguration(
+      ConfigurationSource configuration,
+      Class<T> configurationClass,
+      T configObject, String prefix) {
+    injectConfigurationToObject(configuration, configurationClass, configObject,
+        prefix);
+    Class<? super T> superClass = configurationClass.getSuperclass();
+    while (superClass != null) {
+      injectConfigurationToObject(configuration, superClass, configObject,
+          prefix);
+      superClass = superClass.getSuperclass();
+    }
+  }
+
+  public static <T> void injectConfigurationToObject(ConfigurationSource from,
+      Class<T> configurationClass,
+      T configuration,
+      String prefix) {
+    for (Field field : configurationClass.getDeclaredFields()) {
+      if (field.isAnnotationPresent(Config.class)) {
+
+        String fieldLocation =
+            configurationClass + "." + field.getName();
+
+        Config configAnnotation = field.getAnnotation(Config.class);
+
+        String key = prefix + "." + configAnnotation.key();
+
+        ConfigType type = configAnnotation.type();
+
+        if (type == ConfigType.AUTO) {
+          type = detectConfigType(field.getType(), fieldLocation);
+        }
+
+        //Note: default value is handled by ozone-default.xml. Here we can
+        //use any default.
+        try {
+          switch (type) {
+          case STRING:
+            forcedFieldSet(field, configuration, from.get(key));
+            break;
+          case INT:
+            forcedFieldSet(field, configuration, from.getInt(key, 0));
+            break;
+          case BOOLEAN:
+            forcedFieldSet(field, configuration, from.getBoolean(key, false));
+            break;
+          case LONG:
+            forcedFieldSet(field, configuration, from.getLong(key, 0));
+            break;
+          case TIME:
+            forcedFieldSet(field, configuration,
+                from.getTimeDuration(key, "0s", configAnnotation.timeUnit()));
+            break;
+          default:
+            throw new ConfigurationException(
+                "Unsupported ConfigType " + type + " on " + fieldLocation);
+          }
+        } catch (IllegalAccessException e) {
+          throw new ConfigurationException(
+              "Can't inject configuration to " + fieldLocation, e);
+        }
+
+      }
+    }
+  }
+
+  /**
+   * Set the value of one field even if it's private.
+   */
+  private static <T> void forcedFieldSet(Field field, T object, Object value)
+      throws IllegalAccessException {
+    boolean accessChanged = false;
+    if (!field.isAccessible()) {
+      field.setAccessible(true);
+      accessChanged = true;
+    }
+    field.set(object, value);
+    if (accessChanged) {
+      field.setAccessible(false);
+    }
+  }
+
+  private static ConfigType detectConfigType(Class<?> parameterType,
+      String methodLocation) {
+    ConfigType type;
+    if (parameterType == String.class) {
+      type = ConfigType.STRING;
+    } else if (parameterType == Integer.class || parameterType == int.class) {
+      type = ConfigType.INT;
+    } else if (parameterType == Long.class || parameterType == long.class) {
+      type = ConfigType.LONG;
+    } else if (parameterType == Boolean.class
+        || parameterType == boolean.class) {
+      type = ConfigType.BOOLEAN;
+    } else {
+      throw new ConfigurationException(
+          "Unsupported configuration type " + parameterType + " in "
+              + methodLocation);
+    }
+    return type;
+  }
+
+  public static <T> void callPostConstruct(Class<T> configurationClass,
+      T configObject) {
+    for (Method method : configurationClass.getMethods()) {
+      if (method.isAnnotationPresent(PostConstruct.class)) {
+        try {
+          method.invoke(configObject);
+        } catch (IllegalAccessException ex) {
+          throw new IllegalArgumentException(
+              "@PostConstruct method in " + configurationClass
+                  + " is not accessible");
+        } catch (InvocationTargetException e) {
+          if (e.getCause() instanceof RuntimeException) {
+            throw (RuntimeException) e.getCause();
+          } else {
+            throw new IllegalArgumentException(
+                "@PostConstruct can't be executed on " + configurationClass
+                    + " after configObject "
+                    + "injection", e);
+          }
+        }
+      }
+    }
+  }
+}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
new file mode 100644
index 0000000..bc20f68
--- /dev/null
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationSource.java
@@ -0,0 +1,291 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.io.IOException;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+/**
+ * Lightweight interface to defined the contract of the Configuration objects.
+ */
+public interface ConfigurationSource {
+
+  String[] EMPTY_STRING_ARRAY = {};
+
+  String get(String key);
+
+  Collection<String> getConfigKeys();
+
+  @Deprecated
+    //TODO: user read only configs and don't use it to store actual port
+    // numbers.
+  void set(String key, String value);
+
+  default String get(String key, String defaultValue) {
+    String value = get(key);
+    return value != null ? value : defaultValue;
+  }
+
+  default int getInt(String key, int defaultValue) {
+    String value = get(key);
+    return value != null ? Integer.parseInt(value) : defaultValue;
+  }
+
+  /**
+   * Get the value of the <code>name</code> property as a set of comma-delimited
+   * <code>int</code> values.
+   * <p>
+   * If no such property exists, an empty array is returned.
+   *
+   * @param name property name
+   * @return property value interpreted as an array of comma-delimited
+   * <code>int</code> values
+   */
+  default int[] getInts(String name) {
+    String[] strings = getTrimmedStrings(name);
+    int[] ints = new int[strings.length];
+    for (int i = 0; i < strings.length; i++) {
+      ints[i] = Integer.parseInt(strings[i]);
+    }
+    return ints;
+  }
+
+  default long getLong(String key, long defaultValue) {
+    String value = get(key);
+    return value != null ? Long.parseLong(value) : defaultValue;
+  }
+
+  default boolean getBoolean(String key, boolean defaultValue) {
+    String value = get(key);
+    return value != null ? Boolean.parseBoolean(value) : defaultValue;
+  }
+
+  default float getFloat(String key, float defaultValue) {
+    String value = get(key);
+    return value != null ? Float.parseFloat(value) : defaultValue;
+  }
+
+  default double getDouble(String key, double defaultValue) {
+    String value = get(key);
+    return value != null ? Double.parseDouble(value) : defaultValue;
+  }
+
+  default String getTrimmed(String key) {
+    String value = get(key);
+    return value != null ? value.trim() : null;
+  }
+
+  default String getTrimmed(String key, String defaultValue) {
+    String value = getTrimmed(key);
+    return value != null ? value : defaultValue;
+  }
+
+  default String[] getTrimmedStrings(String name) {
+    String valueString = get(name);
+    if (null == valueString || valueString.trim().isEmpty()) {
+      return EMPTY_STRING_ARRAY;
+    }
+
+    return valueString.trim().split("\\s*[,\n]\\s*");
+  }
+
+  default char[] getPassword(String key) throws IOException {
+    return get(key).toCharArray();
+  }
+
+  default Map<String, String> getPropsWithPrefix(String confPrefix) {
+    Map<String, String> configMap = new HashMap<>();
+    for (String name : getConfigKeys()) {
+      if (name.startsWith(confPrefix)) {
+        String value = get(name);
+        String keyName = name.substring(confPrefix.length());
+        configMap.put(keyName, value);
+      }
+    }
+    return configMap;
+  }
+
+  /**
+   * Create a Configuration object and inject the required configuration values.
+   *
+   * @param configurationClass The class where the fields are annotated with
+   *                           the configuration.
+   * @return Initiated java object where the config fields are injected.
+   */
+  default <T> T getObject(Class<T> configurationClass) {
+
+    T configObject;
+
+    try {
+      configObject = configurationClass.newInstance();
+    } catch (InstantiationException | IllegalAccessException e) {
+      throw new ConfigurationException(
+          "Configuration class can't be created: " + configurationClass, e);
+    }
+    ConfigGroup configGroup =
+        configurationClass.getAnnotation(ConfigGroup.class);
+
+    String prefix = configGroup.prefix();
+
+    ConfigurationReflectionUtil
+        .injectConfiguration(this, configurationClass, configObject,
+            prefix);
+
+    ConfigurationReflectionUtil
+        .callPostConstruct(configurationClass, configObject);
+
+    return configObject;
+
+  }
+
+  /**
+   * Get the value of the <code>name</code> property as a <code>Class</code>
+   * implementing the interface specified by <code>xface</code>.
+   * <p>
+   * If no such property is specified, then <code>defaultValue</code> is
+   * returned.
+   * <p>
+   * An exception is thrown if the returned class does not implement the named
+   * interface.
+   *
+   * @param name         the class name.
+   * @param defaultValue default value.
+   * @param xface        the interface implemented by the named class.
+   * @return property value as a <code>Class</code>,
+   * or <code>defaultValue</code>.
+   */
+  default <U> Class<? extends U> getClass(String name,
+      Class<? extends U> defaultValue,
+      Class<U> xface) {
+    try {
+      Class<?> theClass = getClass(name, defaultValue);
+      if (theClass != null && !xface.isAssignableFrom(theClass)) {
+        throw new RuntimeException(theClass + " not " + xface.getName());
+      } else if (theClass != null) {
+        return theClass.asSubclass(xface);
+      } else {
+        return null;
+      }
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * Get the value of the <code>name</code> property as a <code>Class</code>.
+   * If no such property is specified, then <code>defaultValue</code> is
+   * returned.
+   *
+   * @param name         the class name.
+   * @param defaultValue default value.
+   * @return property value as a <code>Class</code>,
+   * or <code>defaultValue</code>.
+   */
+  default Class<?> getClass(String name, Class<?> defaultValue) {
+    String valueString = getTrimmed(name);
+    if (valueString == null) {
+      return defaultValue;
+    }
+    try {
+      return Class.forName(name);
+    } catch (ClassNotFoundException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  default Class<?>[] getClasses(String name, Class<?>... defaultValue) {
+    String valueString = get(name);
+    if (null == valueString) {
+      return defaultValue;
+    }
+    String[] classnames = getTrimmedStrings(name);
+    try {
+      Class<?>[] classes = new Class<?>[classnames.length];
+      for (int i = 0; i < classnames.length; i++) {
+        classes[i] = Class.forName(classnames[i]);
+      }
+      return classes;
+    } catch (ClassNotFoundException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  default long getTimeDuration(String name, long defaultValue,
+      TimeUnit unit) {
+    String vStr = get(name);
+    if (null == vStr) {
+      return defaultValue;
+    } else {
+      return TimeDurationUtil.getTimeDurationHelper(name, vStr, unit);
+    }
+  }
+
+  default long getTimeDuration(String name, String defaultValue,
+      TimeUnit unit) {
+    String vStr = get(name);
+    if (null == vStr) {
+      return TimeDurationUtil.getTimeDurationHelper(name, defaultValue, unit);
+    } else {
+      return TimeDurationUtil.getTimeDurationHelper(name, vStr, unit);
+    }
+  }
+
+  default double getStorageSize(String name, String defaultValue,
+      StorageUnit targetUnit) {
+    String vString = get(name);
+    if (vString == null) {
+      vString = defaultValue;
+    }
+
+    // Please note: There is a bit of subtlety here. If the user specifies
+    // the default unit as "1GB", but the requested unit is MB, we will return
+    // the format in MB even thought the default string is specified in GB.
+
+    // Converts a string like "1GB" to to unit specified in targetUnit.
+
+    StorageSize measure = StorageSize.parse(vString);
+
+    double byteValue = measure.getUnit().toBytes(measure.getValue());
+    return targetUnit.fromBytes(byteValue);
+  }
+
+  default Collection<String> getTrimmedStringCollection(String key) {
+    return Arrays.asList(getTrimmedStrings(key));
+  }
+
+  /**
+   * Return value matching this enumerated type.
+   * Note that the returned value is trimmed by this method.
+   *
+   * @param name         Property name
+   * @param defaultValue Value returned if no mapping exists
+   * @throws IllegalArgumentException If mapping is illegal for the type
+   *                                  provided
+   */
+  default <T extends Enum<T>> T getEnum(String name, T defaultValue) {
+    final String val = getTrimmed(name);
+    return null == val
+        ? defaultValue
+        : Enum.valueOf(defaultValue.getDeclaringClass(), val);
+  }
+
+}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
new file mode 100644
index 0000000..15016be
--- /dev/null
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageSize.java
@@ -0,0 +1,102 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.util.Locale;
+
+/**
+ * A class that contains the numeric value and the unit of measure.
+ */
+public class StorageSize {
+  private final StorageUnit unit;
+  private final double value;
+
+  /**
+   * Constucts a Storage Measure, which contains the value and the unit of
+   * measure.
+   *
+   * @param unit  - Unit of Measure
+   * @param value - Numeric value.
+   */
+  public StorageSize(StorageUnit unit, double value) {
+    this.unit = unit;
+    this.value = value;
+  }
+
+  private static void checkState(boolean state, String errorString) {
+    if (!state) {
+      throw new IllegalStateException(errorString);
+    }
+  }
+
+  public static StorageSize parse(String value) {
+    checkState(value != null && value.length() > 0, "value cannot be blank");
+    String sanitizedValue = value.trim().toLowerCase(Locale.ENGLISH);
+    StorageUnit parsedUnit = null;
+    for (StorageUnit unit : StorageUnit.values()) {
+      if (sanitizedValue.endsWith(unit.getShortName()) ||
+          sanitizedValue.endsWith(unit.getLongName()) ||
+          sanitizedValue.endsWith(unit.getSuffixChar())) {
+        parsedUnit = unit;
+        break;
+      }
+    }
+
+    if (parsedUnit == null) {
+      throw new IllegalArgumentException(value + " is not in expected format." +
+          "Expected format is <number><unit>. e.g. 1000MB");
+    }
+
+    String suffix = "";
+    boolean found = false;
+
+    // We are trying to get the longest match first, so the order of
+    // matching is getLongName, getShortName and then getSuffixChar.
+    if (!found && sanitizedValue.endsWith(parsedUnit.getLongName())) {
+      found = true;
+      suffix = parsedUnit.getLongName();
+    }
+
+    if (!found && sanitizedValue.endsWith(parsedUnit.getShortName())) {
+      found = true;
+      suffix = parsedUnit.getShortName();
+    }
+
+    if (!found && sanitizedValue.endsWith(parsedUnit.getSuffixChar())) {
+      found = true;
+      suffix = parsedUnit.getSuffixChar();
+    }
+
+    checkState(found, "Something is wrong, we have to find a " +
+        "match. Internal error.");
+
+    String valString =
+        sanitizedValue.substring(0, value.length() - suffix.length());
+    return new StorageSize(parsedUnit, Double.parseDouble(valString));
+
+  }
+
+  public StorageUnit getUnit() {
+    return unit;
+  }
+
+  public double getValue() {
+    return value;
+  }
+
+}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageUnit.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageUnit.java
new file mode 100644
index 0000000..6678aa4
--- /dev/null
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/StorageUnit.java
@@ -0,0 +1,529 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+
+/**
+ * Enum to represent storage unit.
+ */
+public enum StorageUnit {
+  /*
+    We rely on BYTES being the last to get longest matching short names first.
+    The short name of bytes is b and it will match with other longer names.
+
+    if we change this order, the corresponding code in
+    Configuration#parseStorageUnit needs to be changed too, since values()
+    call returns the Enums in declared order and we depend on it.
+   */
+
+  EB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, EXABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, EXABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, EXABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, EXABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return multiply(value, EXABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return multiply(value, EXABYTES / PETABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return value;
+    }
+
+    @Override
+    public String getLongName() {
+      return "exabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "eb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "e";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toEBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, EXABYTES);
+    }
+  },
+  PB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, PETABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, PETABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, PETABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, PETABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return multiply(value, PETABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / PETABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "petabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "pb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "p";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toPBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, PETABYTES);
+    }
+  },
+  TB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, TERABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, TERABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, TERABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return multiply(value, TERABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / TERABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / TERABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "terabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "tb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "t";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toTBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, TERABYTES);
+    }
+  },
+  GB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, GIGABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, GIGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return multiply(value, GIGABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / GIGABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / GIGABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "gigabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "gb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "g";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toGBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, GIGABYTES);
+    }
+  },
+  MB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, MEGABYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return multiply(value, MEGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / MEGABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / MEGABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "megabytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "mb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "m";
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, MEGABYTES);
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toMBs(value);
+    }
+  },
+  KB {
+    @Override
+    public double toBytes(double value) {
+      return multiply(value, KILOBYTES);
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return value;
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return divide(value, MEGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES / KILOBYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES / KILOBYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "kilobytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "kb";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "k";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toKBs(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return divide(value, KILOBYTES);
+    }
+  },
+  BYTES {
+    @Override
+    public double toBytes(double value) {
+      return value;
+    }
+
+    @Override
+    public double toKBs(double value) {
+      return divide(value, KILOBYTES);
+    }
+
+    @Override
+    public double toMBs(double value) {
+      return divide(value, MEGABYTES);
+    }
+
+    @Override
+    public double toGBs(double value) {
+      return divide(value, GIGABYTES);
+    }
+
+    @Override
+    public double toTBs(double value) {
+      return divide(value, TERABYTES);
+    }
+
+    @Override
+    public double toPBs(double value) {
+      return divide(value, PETABYTES);
+    }
+
+    @Override
+    public double toEBs(double value) {
+      return divide(value, EXABYTES);
+    }
+
+    @Override
+    public String getLongName() {
+      return "bytes";
+    }
+
+    @Override
+    public String getShortName() {
+      return "b";
+    }
+
+    @Override
+    public String getSuffixChar() {
+      return "b";
+    }
+
+    @Override
+    public double getDefault(double value) {
+      return toBytes(value);
+    }
+
+    @Override
+    public double fromBytes(double value) {
+      return value;
+    }
+  };
+
+  private static final double BYTE = 1L;
+  private static final double KILOBYTES = BYTE * 1024L;
+  private static final double MEGABYTES = KILOBYTES * 1024L;
+  private static final double GIGABYTES = MEGABYTES * 1024L;
+  private static final double TERABYTES = GIGABYTES * 1024L;
+  private static final double PETABYTES = TERABYTES * 1024L;
+  private static final double EXABYTES = PETABYTES * 1024L;
+  private static final int PRECISION = 4;
+
+  /**
+   * Using BigDecimal to avoid issues with overflow and underflow.
+   *
+   * @param value - value
+   * @param divisor - divisor.
+   * @return -- returns a double that represents this value
+   */
+  private static double divide(double value, double divisor) {
+    BigDecimal val = new BigDecimal(value);
+    BigDecimal bDivisor = new BigDecimal(divisor);
+    return val.divide(bDivisor).setScale(PRECISION, RoundingMode.HALF_UP)
+        .doubleValue();
+  }
+
+  /**
+   * Using BigDecimal so we can throw if we are overflowing the Long.Max.
+   *
+   * @param first - First Num.
+   * @param second - Second Num.
+   * @return Returns a double
+   */
+  private static double multiply(double first, double second) {
+    BigDecimal firstVal = new BigDecimal(first);
+    BigDecimal secondVal = new BigDecimal(second);
+    return firstVal.multiply(secondVal)
+        .setScale(PRECISION, RoundingMode.HALF_UP).doubleValue();
+  }
+
+  public abstract double toBytes(double value);
+
+  public abstract double toKBs(double value);
+
+  public abstract double toMBs(double value);
+
+  public abstract double toGBs(double value);
+
+  public abstract double toTBs(double value);
+
+  public abstract double toPBs(double value);
+
+  public abstract double toEBs(double value);
+
+  public abstract String getLongName();
+
+  public abstract String getShortName();
+
+  public abstract String getSuffixChar();
+
+  public abstract double getDefault(double value);
+
+  public abstract double fromBytes(double value);
+
+  public String toString() {
+    return getLongName();
+  }
+
+}
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java
new file mode 100644
index 0000000..2bbdecf
--- /dev/null
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/TimeDurationUtil.java
@@ -0,0 +1,154 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import java.util.concurrent.TimeUnit;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Utility to handle time duration.
+ */
+public final class TimeDurationUtil {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TimeDurationUtil.class);
+
+  private TimeDurationUtil() {
+  }
+
+  /**
+   * Return time duration in the given time unit. Valid units are encoded in
+   * properties as suffixes: nanoseconds (ns), microseconds (us), milliseconds
+   * (ms), seconds (s), minutes (m), hours (h), and days (d).
+   *
+   * @param name Property name
+   * @param vStr The string value with time unit suffix to be converted.
+   * @param unit Unit to convert the stored property, if it exists.
+   */
+  public static long getTimeDurationHelper(String name, String vStr,
+      TimeUnit unit) {
+    vStr = vStr.trim();
+    vStr = vStr.toLowerCase();
+    ParsedTimeDuration vUnit = ParsedTimeDuration.unitFor(vStr);
+    if (null == vUnit) {
+      LOG.warn("No unit for " + name + "(" + vStr + ") assuming " + unit);
+      vUnit = ParsedTimeDuration.unitFor(unit);
+    } else {
+      vStr = vStr.substring(0, vStr.lastIndexOf(vUnit.suffix()));
+    }
+
+    long raw = Long.parseLong(vStr);
+    long converted = unit.convert(raw, vUnit.unit());
+    if (vUnit.unit().convert(converted, unit) < raw) {
+      LOG.warn("Possible loss of precision converting " + vStr
+          + vUnit.suffix() + " to " + unit + " for " + name);
+    }
+    return converted;
+  }
+
+  enum ParsedTimeDuration {
+    NS {
+      TimeUnit unit() {
+        return TimeUnit.NANOSECONDS;
+      }
+
+      String suffix() {
+        return "ns";
+      }
+    },
+    US {
+      TimeUnit unit() {
+        return TimeUnit.MICROSECONDS;
+      }
+
+      String suffix() {
+        return "us";
+      }
+    },
+    MS {
+      TimeUnit unit() {
+        return TimeUnit.MILLISECONDS;
+      }
+
+      String suffix() {
+        return "ms";
+      }
+    },
+    S {
+      TimeUnit unit() {
+        return TimeUnit.SECONDS;
+      }
+
+      String suffix() {
+        return "s";
+      }
+    },
+    M {
+      TimeUnit unit() {
+        return TimeUnit.MINUTES;
+      }
+
+      String suffix() {
+        return "m";
+      }
+    },
+    H {
+      TimeUnit unit() {
+        return TimeUnit.HOURS;
+      }
+
+      String suffix() {
+        return "h";
+      }
+    },
+    D {
+      TimeUnit unit() {
+        return TimeUnit.DAYS;
+      }
+
+      String suffix() {
+        return "d";
+      }
+    };
+
+    abstract TimeUnit unit();
+
+    abstract String suffix();
+
+    static ParsedTimeDuration unitFor(String s) {
+      for (ParsedTimeDuration ptd : values()) {
+        // iteration order is in decl order, so SECONDS matched last
+        if (s.endsWith(ptd.suffix())) {
+          return ptd;
+        }
+      }
+      return null;
+    }
+
+    static ParsedTimeDuration unitFor(TimeUnit unit) {
+      for (ParsedTimeDuration ptd : values()) {
+        if (ptd.unit() == unit) {
+          return ptd;
+        }
+      }
+      return null;
+    }
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
index fe2d065..74155c2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeHttpServer.java
@@ -19,8 +19,8 @@ package org.apache.hadoop.ozone;
 
 import java.io.IOException;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 
 /**
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.server.http.BaseHttpServer;
  */
 public class HddsDatanodeHttpServer extends BaseHttpServer {
 
-  public HddsDatanodeHttpServer(Configuration conf) throws IOException {
+  public HddsDatanodeHttpServer(OzoneConfiguration conf) throws IOException {
     super(conf, "hddsDatanode");
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 6a6d718..e811a88 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -29,11 +29,11 @@ import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.cli.GenericCli;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.SCMSecurityProtocolProtos.SCMGetCertResponseProto;
@@ -357,7 +357,7 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
    * @param config
    * */
   @VisibleForTesting
-  public PKCS10CertificationRequest getCSR(Configuration config)
+  public PKCS10CertificationRequest getCSR(ConfigurationSource config)
       throws IOException {
     CertificateSignRequest.Builder builder = dnCertClient.getCSRBuilder();
     KeyPair keyPair = new KeyPair(dnCertClient.getPublicKey(),
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 3de9579..e07f626 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
@@ -86,7 +86,7 @@ public class ContainerMetrics {
     }
   }
 
-  public static ContainerMetrics create(Configuration conf) {
+  public static ContainerMetrics create(ConfigurationSource conf) {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     // Percentile measurement is off by default, by watching no intervals
     int[] intervals =
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
index a5bcc22..055f448 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkLayOutVersion.java
@@ -18,20 +18,20 @@
 package org.apache.hadoop.ozone.container.common.impl;
 
 
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableList;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.util.List;
+
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.util.List;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableList;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_FIND_DATA_DIR;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Defines layout versions for the Chunks.
@@ -94,7 +94,8 @@ public enum ChunkLayOutVersion {
   /**
    * @return the latest version.
    */
-  public static ChunkLayOutVersion getConfiguredVersion(Configuration conf) {
+  public static ChunkLayOutVersion getConfiguredVersion(
+      ConfigurationSource conf) {
     try {
       return conf.getEnum(ScmConfigKeys.OZONE_SCM_CHUNK_LAYOUT_KEY,
           DEFAULT_LAYOUT);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 0ea8189..3f07c95 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -23,7 +23,7 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.Set;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -77,7 +77,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
   private static final AuditLogger AUDIT =
       new AuditLogger(AuditLoggerType.DNLOGGER);
   private final Map<ContainerType, Handler> handlers;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final ContainerSet containerSet;
   private final VolumeSet volumeSet;
   private final StateContext context;
@@ -92,7 +92,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
    * Constructs an OzoneContainer that receives calls from
    * XceiverServerHandler.
    */
-  public HddsDispatcher(Configuration config, ContainerSet contSet,
+  public HddsDispatcher(ConfigurationSource config, ContainerSet contSet,
       VolumeSet volumes, Map<ContainerType, Handler> handlers,
       StateContext context, ContainerMetrics metrics,
       TokenVerifier tokenVerifier) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 9f5b9f7..4ba7572 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -23,7 +23,7 @@ import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.function.Consumer;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType;
@@ -45,7 +45,7 @@ import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 @SuppressWarnings("visibilitymodifier")
 public abstract class Handler {
 
-  protected final Configuration conf;
+  protected final ConfigurationSource conf;
   protected final ContainerSet containerSet;
   protected final VolumeSet volumeSet;
   protected String scmID;
@@ -53,7 +53,7 @@ public abstract class Handler {
   protected String datanodeId;
   private Consumer<ContainerReplicaProto> icrSender;
 
-  protected Handler(Configuration config, String datanodeId,
+  protected Handler(ConfigurationSource config, String datanodeId,
       ContainerSet contSet, VolumeSet volumeSet,
       ContainerMetrics containerMetrics,
       Consumer<ContainerReplicaProto> icrSender) {
@@ -66,7 +66,7 @@ public abstract class Handler {
   }
 
   public static Handler getHandlerForContainerType(
-      final ContainerType containerType, final Configuration config,
+      final ContainerType containerType, final ConfigurationSource config,
       final String datanodeId, final ContainerSet contSet,
       final VolumeSet volumeSet, final ContainerMetrics metrics,
       Consumer<ContainerReplicaProto> icrSender) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
index 536d4cc..bb43d0f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportManager.java
@@ -17,20 +17,21 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.util.concurrent.HadoopExecutors;
+
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.util.concurrent.HadoopExecutors;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.TimeUnit;
-
 /**
  * ReportManager is responsible for managing all the {@link ReportPublisher}
  * and also provides {@link ScheduledExecutorService} to ReportPublisher
@@ -89,7 +90,7 @@ public final class ReportManager {
    * @param conf  - Conf
    * @return builder - Builder.
    */
-  public static Builder newBuilder(Configuration conf) {
+  public static Builder newBuilder(ConfigurationSource conf) {
     return new Builder(conf);
   }
 
@@ -103,7 +104,7 @@ public final class ReportManager {
     private ReportPublisherFactory publisherFactory;
 
 
-    private Builder(Configuration conf) {
+    private Builder(ConfigurationSource conf) {
       this.reportPublishers = new ArrayList<>();
       this.publisherFactory = new ReportPublisherFactory(conf);
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
index a5e04aa..685a1d9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisher.java
@@ -17,30 +17,29 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.IOException;
 import java.util.concurrent.ScheduledExecutorService;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+
+import com.google.protobuf.GeneratedMessage;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * Abstract class responsible for scheduling the reports based on the
  * configured interval. All the ReportPublishers should extend this class.
  */
 public abstract class ReportPublisher<T extends GeneratedMessage>
-    implements Configurable, Runnable {
+    implements Runnable {
 
   private static final Logger LOG = LoggerFactory.getLogger(
       ReportPublisher.class);
 
-  private Configuration config;
+  private ConfigurationSource config;
   private StateContext context;
   private ScheduledExecutorService executor;
 
@@ -58,13 +57,11 @@ public abstract class ReportPublisher<T extends GeneratedMessage>
         getReportFrequency(), TimeUnit.MILLISECONDS);
   }
 
-  @Override
-  public void setConf(Configuration conf) {
+  public void setConf(ConfigurationSource conf) {
     config = conf;
   }
 
-  @Override
-  public Configuration getConf() {
+  public ConfigurationSource getConf() {
     return config;
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
index 1c456a0..4533691 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/report/ReportPublisherFactory.java
@@ -17,27 +17,23 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
-import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.
-        StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.util.ReflectionUtils;
-
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+
+import com.google.protobuf.GeneratedMessage;
+
 /**
  * Factory class to construct {@link ReportPublisher} for a report.
  */
 public class ReportPublisherFactory {
 
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final Map<Class<? extends GeneratedMessage>,
       Class<? extends ReportPublisher>> report2publisher;
 
@@ -46,7 +42,7 @@ public class ReportPublisherFactory {
    *
    * @param conf Configuration to be passed to the {@link ReportPublisher}
    */
-  public ReportPublisherFactory(Configuration conf) {
+  public ReportPublisherFactory(ConfigurationSource conf) {
     this.conf = conf;
     this.report2publisher = new HashMap<>();
 
@@ -73,7 +69,13 @@ public class ReportPublisherFactory {
     if (publisherClass == null) {
       throw new RuntimeException("No publisher found for report " + report);
     }
-    return ReflectionUtils.newInstance(publisherClass, conf);
+    try {
+      ReportPublisher reportPublisher = publisherClass.newInstance();
+      reportPublisher.setConf(conf);
+      return reportPublisher;
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
   }
 
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index dcde6fe..e41a537 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -24,34 +24,23 @@ import java.util.concurrent.atomic.AtomicLong;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.ozone.HddsDatanodeStopService;
 import org.apache.hadoop.ozone.container.common.report.ReportManager;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .CloseContainerCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .ClosePipelineCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .CommandDispatcher;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .CreatePipelineCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .DeleteBlocksCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .DeleteContainerCommandHandler;
-import org.apache.hadoop.ozone.container.common.statemachine.commandhandler
-    .ReplicateContainerCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CloseContainerCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ClosePipelineCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CommandDispatcher;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.CreatePipelineCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteBlocksCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.DeleteContainerCommandHandler;
+import org.apache.hadoop.ozone.container.common.statemachine.commandhandler.ReplicateContainerCommandHandler;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.replication.ContainerReplicator;
@@ -76,7 +65,7 @@ public class DatanodeStateMachine implements Closeable {
   static final Logger LOG =
       LoggerFactory.getLogger(DatanodeStateMachine.class);
   private final ExecutorService executorService;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final SCMConnectionManager connectionManager;
   private StateContext context;
   private final OzoneContainer container;
@@ -106,11 +95,10 @@ public class DatanodeStateMachine implements Closeable {
    *                     enabled
    */
   public DatanodeStateMachine(DatanodeDetails datanodeDetails,
-      Configuration conf, CertificateClient certClient,
+      ConfigurationSource conf, CertificateClient certClient,
       HddsDatanodeStopService hddsDatanodeStopService) throws IOException {
-    OzoneConfiguration ozoneConf = new OzoneConfiguration(conf);
     DatanodeConfiguration dnConf =
-        ozoneConf.getObject(DatanodeConfiguration.class);
+        conf.getObject(DatanodeConfiguration.class);
 
     this.hddsDatanodeStopService = hddsDatanodeStopService;
     this.conf = conf;
@@ -126,7 +114,7 @@ public class DatanodeStateMachine implements Closeable {
     constructionLock.writeLock().lock();
     try {
       container = new OzoneContainer(this.datanodeDetails,
-          ozoneConf, context, certClient);
+          conf, context, certClient);
     } finally {
       constructionLock.writeLock().unlock();
     }
@@ -208,7 +196,8 @@ public class DatanodeStateMachine implements Closeable {
 
     // Start jvm monitor
     jvmPauseMonitor = new JvmPauseMonitor();
-    jvmPauseMonitor.init(conf);
+    jvmPauseMonitor
+        .init(LegacyHadoopConfigurationSource.asHadoopConfiguration(conf));
     jvmPauseMonitor.start();
 
     while (context.getState() != DatanodeStates.SHUTDOWN) {
@@ -456,7 +445,7 @@ public class DatanodeStateMachine implements Closeable {
    *
    * @param config
    */
-  private void initCommandHandlerThread(Configuration config) {
+  private void initCommandHandlerThread(ConfigurationSource config) {
 
     /**
      * Task that periodically checks if we have any outstanding commands.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
index a500d4a..cd1a376 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/EndpointStateMachine.java
@@ -16,7 +16,7 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocolPB
     .StorageContainerDatanodeProtocolClientSideTranslatorPB;
@@ -46,7 +46,7 @@ public class EndpointStateMachine
   private final AtomicLong missedCount;
   private final InetSocketAddress address;
   private final Lock lock;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private EndPointStates state;
   private VersionResponse version;
   private ZonedDateTime lastSuccessfulHeartbeat;
@@ -59,7 +59,7 @@ public class EndpointStateMachine
    */
   public EndpointStateMachine(InetSocketAddress address,
       StorageContainerDatanodeProtocolClientSideTranslatorPB endPoint,
-      Configuration conf) {
+      ConfigurationSource conf) {
     this.endPoint = endPoint;
     this.missedCount = new AtomicLong(0);
     this.address = address;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
index 814eeb4..ebc53c9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/SCMConnectionManager.java
@@ -16,22 +16,6 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
-import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import javax.management.ObjectName;
 import java.io.Closeable;
 import java.io.IOException;
@@ -45,9 +29,25 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.protocolPB.ReconDatanodeProtocolPB;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
+import org.apache.hadoop.security.UserGroupInformation;
+
 import static java.util.Collections.unmodifiableList;
-import static org.apache.hadoop.hdds.utils.HddsServerUtil
-    .getScmRpcTimeOutInMilliseconds;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmRpcTimeOutInMilliseconds;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * SCMConnectionManager - Acts as a class that manages the membership
@@ -62,10 +62,10 @@ public class SCMConnectionManager
   private final Map<InetSocketAddress, EndpointStateMachine> scmMachines;
 
   private final int rpcTimeout;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private ObjectName jmxBean;
 
-  public SCMConnectionManager(Configuration conf) {
+  public SCMConnectionManager(ConfigurationSource conf) {
     this.mapLock = new ReentrantReadWriteLock();
     Long timeOut = getScmRpcTimeOutInMilliseconds(conf);
     this.rpcTimeout = timeOut.intValue();
@@ -82,7 +82,7 @@ public class SCMConnectionManager
    *
    * @return ozoneConfig.
    */
-  public Configuration getConf() {
+  public ConfigurationSource getConf() {
     return conf;
   }
 
@@ -139,7 +139,11 @@ public class SCMConnectionManager
         return;
       }
 
-      RPC.setProtocolEngine(conf, StorageContainerDatanodeProtocolPB.class,
+      Configuration hadoopConfig =
+          LegacyHadoopConfigurationSource.asHadoopConfiguration(this.conf);
+      RPC.setProtocolEngine(
+          hadoopConfig,
+          StorageContainerDatanodeProtocolPB.class,
           ProtobufRpcEngine.class);
       long version =
           RPC.getProtocolVersion(StorageContainerDatanodeProtocolPB.class);
@@ -150,8 +154,8 @@ public class SCMConnectionManager
 
       StorageContainerDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
           StorageContainerDatanodeProtocolPB.class, version,
-          address, UserGroupInformation.getCurrentUser(), conf,
-          NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(),
+          address, UserGroupInformation.getCurrentUser(), hadoopConfig,
+          NetUtils.getDefaultSocketFactory(hadoopConfig), getRpcTimeout(),
           retryPolicy).getProxy();
 
       StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
@@ -159,7 +163,7 @@ public class SCMConnectionManager
           rpcProxy);
 
       EndpointStateMachine endPoint =
-          new EndpointStateMachine(address, rpcClient, conf);
+          new EndpointStateMachine(address, rpcClient, this.conf);
       endPoint.setPassive(false);
       scmMachines.put(address, endPoint);
     } finally {
@@ -181,8 +185,9 @@ public class SCMConnectionManager
             "Ignoring the request.");
         return;
       }
-
-      RPC.setProtocolEngine(conf, ReconDatanodeProtocolPB.class,
+      Configuration hadoopConfig =
+          LegacyHadoopConfigurationSource.asHadoopConfiguration(this.conf);
+      RPC.setProtocolEngine(hadoopConfig, ReconDatanodeProtocolPB.class,
           ProtobufRpcEngine.class);
       long version =
           RPC.getProtocolVersion(ReconDatanodeProtocolPB.class);
@@ -192,8 +197,8 @@ public class SCMConnectionManager
               60000, TimeUnit.MILLISECONDS);
       ReconDatanodeProtocolPB rpcProxy = RPC.getProtocolProxy(
           ReconDatanodeProtocolPB.class, version,
-          address, UserGroupInformation.getCurrentUser(), conf,
-          NetUtils.getDefaultSocketFactory(conf), getRpcTimeout(),
+          address, UserGroupInformation.getCurrentUser(), hadoopConfig,
+          NetUtils.getDefaultSocketFactory(hadoopConfig), getRpcTimeout(),
           retryPolicy).getProxy();
 
       StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 04502b6..73bea25 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -16,54 +16,45 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine;
 
-import com.google.common.base.Preconditions;
-import com.google.protobuf.GeneratedMessage;
-
 import java.net.InetSocketAddress;
+import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.HashSet;
+import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
+import java.util.Queue;
 import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+import java.util.function.Consumer;
 
-import org.apache.commons.collections.CollectionUtils;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.PipelineAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus.Status;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .InitDatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .RunningDatanodeState;
+import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState;
+import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState;
 import org.apache.hadoop.ozone.protocol.commands.CommandStatus;
-import org.apache.hadoop.ozone.protocol.commands
-    .DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder;
+import org.apache.hadoop.ozone.protocol.commands.DeleteBlockCommandStatus.DeleteBlockCommandStatusBuilder;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 
+import com.google.common.base.Preconditions;
+import com.google.protobuf.GeneratedMessage;
 import static java.lang.Math.min;
+import org.apache.commons.collections.CollectionUtils;
 import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmHeartbeatInterval;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Queue;
-import java.util.ArrayList;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-import java.util.function.Consumer;
-
 /**
  * Current Context of State Machine.
  */
@@ -75,7 +66,7 @@ public class StateContext {
   private final Lock lock;
   private final DatanodeStateMachine parent;
   private final AtomicLong stateExecutionCount;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final Set<InetSocketAddress> endpoints;
   private final Map<InetSocketAddress, List<GeneratedMessage>> reports;
   private final Map<InetSocketAddress, Queue<ContainerAction>> containerActions;
@@ -98,8 +89,9 @@ public class StateContext {
    * @param state  - State
    * @param parent Parent State Machine
    */
-  public StateContext(Configuration conf, DatanodeStateMachine.DatanodeStates
-      state, DatanodeStateMachine parent) {
+  public StateContext(ConfigurationSource conf,
+      DatanodeStateMachine.DatanodeStates
+          state, DatanodeStateMachine parent) {
     this.conf = conf;
     this.state = state;
     this.parent = parent;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java
index 49a8fd9..c60c112 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CreatePipelineCommandHandler.java
@@ -16,24 +16,26 @@
  */
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.Collection;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto;
-import org.apache.hadoop.hdds.protocol.proto.
-    StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CreatePipelineCommandProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.transport.server
-    .XceiverServerSpi;
+import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
 import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
 import org.apache.hadoop.util.Time;
+
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.protocol.AlreadyExistsException;
 import org.apache.ratis.protocol.RaftGroup;
@@ -42,11 +44,6 @@ import org.apache.ratis.protocol.RaftPeer;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.Collection;
-import java.util.concurrent.atomic.AtomicLong;
-import java.util.stream.Collectors;
-
 /**
  * Handler for create pipeline command received from SCM.
  */
@@ -56,14 +53,14 @@ public class CreatePipelineCommandHandler implements CommandHandler {
       LoggerFactory.getLogger(CreatePipelineCommandHandler.class);
 
   private final AtomicLong invocationCount = new AtomicLong(0);
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
   private long totalTime;
 
   /**
    * Constructs a createPipelineCommand handler.
    */
-  public CreatePipelineCommandHandler(Configuration conf) {
+  public CreatePipelineCommandHandler(ConfigurationSource conf) {
     this.conf = conf;
   }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
index eac26f1..2098257 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
@@ -17,7 +17,7 @@
 package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -68,13 +68,13 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
       LoggerFactory.getLogger(DeleteBlocksCommandHandler.class);
 
   private final ContainerSet containerSet;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private int invocationCount;
   private long totalTime;
   private boolean cmdExecuted;
 
   public DeleteBlocksCommandHandler(ContainerSet cset,
-      Configuration conf) {
+      ConfigurationSource conf) {
     this.containerSet = cset;
     this.conf = conf;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
index a96032e..41958bf 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/ReplicateContainerCommandHandler.java
@@ -18,7 +18,7 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCommandProto.Type;
@@ -46,12 +46,12 @@ public class ReplicateContainerCommandHandler implements CommandHandler {
 
   private long totalTime;
 
-  private Configuration conf;
+  private ConfigurationSource conf;
 
   private ReplicationSupervisor supervisor;
 
   public ReplicateContainerCommandHandler(
-      Configuration conf,
+      ConfigurationSource conf,
       ReplicationSupervisor supervisor) {
     this.conf = conf;
     this.supervisor = supervisor;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
index a73f1c5..ba898db 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
@@ -16,21 +16,6 @@
  */
 package org.apache.hadoop.ozone.container.common.states.datanode;
 
-import com.google.common.base.Strings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
-import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -42,8 +27,21 @@ import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
+import org.apache.hadoop.ozone.container.common.states.DatanodeState;
+
+import com.google.common.base.Strings;
 import static org.apache.hadoop.hdds.HddsUtils.getReconAddresses;
 import static org.apache.hadoop.hdds.HddsUtils.getSCMAddresses;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Init Datanode State is the task that gets run when we are in Init State.
@@ -52,7 +50,7 @@ public class InitDatanodeState implements DatanodeState,
     Callable<DatanodeStateMachine.DatanodeStates> {
   static final Logger LOG = LoggerFactory.getLogger(InitDatanodeState.class);
   private final SCMConnectionManager connectionManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final StateContext context;
   private Future<DatanodeStateMachine.DatanodeStates> result;
 
@@ -63,7 +61,7 @@ public class InitDatanodeState implements DatanodeState,
    * @param connectionManager - Connection Manager
    * @param context - Current Context
    */
-  public InitDatanodeState(Configuration conf,
+  public InitDatanodeState(ConfigurationSource conf,
                            SCMConnectionManager connectionManager,
                            StateContext context) {
     this.conf = conf;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
index 779b1a2..1ecfbf9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
@@ -16,7 +16,7 @@
  */
 package org.apache.hadoop.ozone.container.common.states.datanode;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine.EndPointStates;
@@ -51,14 +51,14 @@ public class RunningDatanodeState implements DatanodeState {
   static final Logger
       LOG = LoggerFactory.getLogger(RunningDatanodeState.class);
   private final SCMConnectionManager connectionManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final StateContext context;
   private CompletionService<EndPointStates> ecs;
   /** Cache the end point task per end point per end point state. */
   private Map<EndpointStateMachine, Map<EndPointStates,
       Callable<EndPointStates>>> endpointTasks;
 
-  public RunningDatanodeState(Configuration conf,
+  public RunningDatanodeState(ConfigurationSource conf,
       SCMConnectionManager connectionManager,
       StateContext context) {
     this.connectionManager = connectionManager;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index fb1d1af..494ccd9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.Descriptors;
 import com.google.protobuf.GeneratedMessage;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -78,7 +78,7 @@ public class HeartbeatEndpointTask
   static final Logger LOG =
       LoggerFactory.getLogger(HeartbeatEndpointTask.class);
   private final EndpointStateMachine rpcEndpoint;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private DatanodeDetailsProto datanodeDetailsProto;
   private StateContext context;
   private int maxContainerActionsPerHB;
@@ -90,7 +90,7 @@ public class HeartbeatEndpointTask
    * @param conf Config.
    */
   public HeartbeatEndpointTask(EndpointStateMachine rpcEndpoint,
-      Configuration conf, StateContext context) {
+      ConfigurationSource conf, StateContext context) {
     this.rpcEndpoint = rpcEndpoint;
     this.conf = conf;
     this.context = context;
@@ -344,7 +344,7 @@ public class HeartbeatEndpointTask
    */
   public static class Builder {
     private EndpointStateMachine endPointStateMachine;
-    private Configuration conf;
+    private ConfigurationSource conf;
     private DatanodeDetails datanodeDetails;
     private StateContext context;
 
@@ -371,7 +371,7 @@ public class HeartbeatEndpointTask
      * @param config - config
      * @return Builder
      */
-    public Builder setConfig(Configuration config) {
+    public Builder setConfig(ConfigurationSource config) {
       this.conf = config;
       return this;
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 92e5743..be95f01 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.container.common.states.endpoint;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.commons.lang3.StringUtils;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
@@ -49,7 +49,7 @@ public final class RegisterEndpointTask implements
   static final Logger LOG = LoggerFactory.getLogger(RegisterEndpointTask.class);
 
   private final EndpointStateMachine rpcEndPoint;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private Future<EndpointStateMachine.EndPointStates> result;
   private DatanodeDetails datanodeDetails;
   private final OzoneContainer datanodeContainerManager;
@@ -64,7 +64,7 @@ public final class RegisterEndpointTask implements
    */
   @VisibleForTesting
   public RegisterEndpointTask(EndpointStateMachine rpcEndPoint,
-      Configuration conf, OzoneContainer ozoneContainer,
+      ConfigurationSource conf, OzoneContainer ozoneContainer,
       StateContext context) {
     this.rpcEndPoint = rpcEndPoint;
     this.conf = conf;
@@ -163,7 +163,7 @@ public final class RegisterEndpointTask implements
    */
   public static class Builder {
     private EndpointStateMachine endPointStateMachine;
-    private Configuration conf;
+    private ConfigurationSource conf;
     private DatanodeDetails datanodeDetails;
     private OzoneContainer container;
     private StateContext context;
@@ -191,7 +191,7 @@ public final class RegisterEndpointTask implements
      * @param config - config
      * @return Builder.
      */
-    public Builder setConfig(Configuration config) {
+    public Builder setConfig(ConfigurationSource config) {
       this.conf = config;
       return this;
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
index 0834f77..6c53756 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
@@ -16,26 +16,25 @@
  */
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import java.io.IOException;
+import java.util.Map;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+
+import com.google.common.base.Preconditions;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.Map;
-import java.util.concurrent.Callable;
-
 /**
  * Task that returns version.
  */
@@ -44,11 +43,11 @@ public class VersionEndpointTask implements
   public static final Logger LOG = LoggerFactory.getLogger(VersionEndpointTask
       .class);
   private final EndpointStateMachine rpcEndPoint;
-  private final Configuration configuration;
+  private final ConfigurationSource configuration;
   private final OzoneContainer ozoneContainer;
 
   public VersionEndpointTask(EndpointStateMachine rpcEndPoint,
-                             Configuration conf, OzoneContainer container) {
+      ConfigurationSource conf, OzoneContainer container) {
     this.rpcEndPoint = rpcEndPoint;
     this.configuration = conf;
     this.ozoneContainer = container;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
index 441d9c8..9adabf4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServerGrpc.java
@@ -18,19 +18,21 @@
 
 package org.apache.hadoop.ozone.container.common.transport.server;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-        .StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.
-    StorageContainerException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.tracing.GrpcServerInterceptor;
@@ -39,6 +41,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 
+import com.google.common.base.Preconditions;
 import io.opentracing.Scope;
 import org.apache.ratis.thirdparty.io.grpc.BindableService;
 import org.apache.ratis.thirdparty.io.grpc.Server;
@@ -50,12 +53,6 @@ import org.apache.ratis.thirdparty.io.netty.handler.ssl.SslContextBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.Collections;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
 /**
  * Creates a Grpc server endpoint that acts as the communication layer for
  * Ozone containers.
@@ -77,7 +74,8 @@ public final class XceiverServerGrpc implements XceiverServerSpi {
    *
    * @param conf - Configuration
    */
-  public XceiverServerGrpc(DatanodeDetails datanodeDetails, Configuration conf,
+  public XceiverServerGrpc(DatanodeDetails datanodeDetails,
+      ConfigurationSource conf,
       ContainerDispatcher dispatcher, CertificateClient caClient,
       BindableService... additionalServices) {
     Preconditions.checkNotNull(conf);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 8b7e710..c529e7b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -18,14 +18,36 @@
 
 package org.apache.hadoop.ozone.container.common.transport.server.ratis;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Semaphore;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Container2BCSIDMapProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ReadChunkResponseProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
@@ -33,11 +55,18 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.hdds.utils.Cache;
 import org.apache.hadoop.hdds.utils.ResourceLimitCache;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
+import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.util.Time;
-import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.ratis.proto.RaftProtos.LogEntryProto;
 import org.apache.ratis.proto.RaftProtos.RaftPeerRole;
+import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
+import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
+import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.RaftClientRequest;
 import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.protocol.RaftGroupMemberId;
 import org.apache.ratis.protocol.RaftPeerId;
@@ -46,54 +75,18 @@ import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.impl.RaftServerProxy;
 import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.ratis.server.raftlog.RaftLog;
-import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo;
-import org.apache.ratis.thirdparty.com.google.protobuf
-    .InvalidProtocolBufferException;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
-    Container2BCSIDMapProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerCommandResponseProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .WriteChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkRequestProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ReadChunkResponseProto;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.RaftClientRequest;
 import org.apache.ratis.server.storage.RaftStorage;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
-import org.apache.ratis.proto.RaftProtos.LogEntryProto;
 import org.apache.ratis.statemachine.StateMachineStorage;
 import org.apache.ratis.statemachine.TransactionContext;
 import org.apache.ratis.statemachine.impl.BaseStateMachine;
 import org.apache.ratis.statemachine.impl.SimpleStateMachineStorage;
+import org.apache.ratis.statemachine.impl.SingleFileSnapshotInfo;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.stream.Collectors;
-import java.util.concurrent.Executors;
-import java.io.FileOutputStream;
-import java.io.FileInputStream;
-import java.io.OutputStream;
-
 /** A {@link org.apache.ratis.statemachine.StateMachine} for containers.
  *
  * The stateMachine is responsible for handling different types of container
@@ -161,7 +154,7 @@ public class ContainerStateMachine extends BaseStateMachine {
   public ContainerStateMachine(RaftGroupId gid, ContainerDispatcher dispatcher,
       ContainerController containerController,
       List<ThreadPoolExecutor> chunkExecutors,
-      XceiverServerRatis ratisServer, Configuration conf) {
+      XceiverServerRatis ratisServer, ConfigurationSource conf) {
     this.gid = gid;
     this.dispatcher = dispatcher;
     this.containerController = containerController;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index a3dc750..d3bdb3f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -18,78 +18,86 @@
 
 package org.apache.hadoop.ozone.container.common.transport.server.ratis;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.ImmutableList;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.BlockingQueue;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.LinkedBlockingDeque;
+import java.util.concurrent.ThreadFactory;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
+import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-
-import io.opentracing.Scope;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.ImmutableList;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import io.opentracing.Scope;
 import org.apache.ratis.RaftConfigKeys;
-import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.grpc.GrpcConfigKeys;
 import org.apache.ratis.grpc.GrpcFactory;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.apache.ratis.netty.NettyConfigKeys;
-import org.apache.ratis.protocol.*;
+import org.apache.ratis.proto.RaftProtos;
+import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.GroupInfoReply;
+import org.apache.ratis.protocol.GroupInfoRequest;
+import org.apache.ratis.protocol.GroupManagementRequest;
+import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.protocol.RaftClientRequest;
+import org.apache.ratis.protocol.RaftGroup;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftGroupMemberId;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.StateMachineException;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.RaftServerConfigKeys;
-import org.apache.ratis.proto.RaftProtos;
-import org.apache.ratis.proto.RaftProtos.RoleInfoProto;
-import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.ratis.server.impl.RaftServerProxy;
+import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.ratis.util.SizeInBytes;
 import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Collection;
-import java.util.HashSet;
-import java.util.Iterator;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.Collections;
-import java.util.Set;
-import java.util.UUID;
-import java.util.ArrayList;
-import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.LinkedBlockingDeque;
-import java.util.concurrent.ThreadFactory;
-import java.util.concurrent.ThreadPoolExecutor;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
-
 /**
  * Creates a ratis server endpoint that acts as the communication layer for
  * Ozone containers.
@@ -113,7 +121,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   private long nodeFailureTimeoutMs;
   private boolean isStarted = false;
   private DatanodeDetails datanodeDetails;
-  private final OzoneConfiguration conf;
+  private final ConfigurationSource conf;
   // TODO: Remove the gids set when Ratis supports an api to query active
   // pipelines
   private final Set<RaftGroupId> raftGids = new HashSet<>();
@@ -123,7 +131,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
 
   private XceiverServerRatis(DatanodeDetails dd, int port,
       ContainerDispatcher dispatcher, ContainerController containerController,
-      StateContext context, GrpcTlsConfig tlsConfig, OzoneConfiguration conf)
+      StateContext context, GrpcTlsConfig tlsConfig, ConfigurationSource conf)
       throws IOException {
     this.conf = conf;
     Objects.requireNonNull(dd, "id == null");
@@ -363,7 +371,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   }
 
   public static XceiverServerRatis newXceiverServerRatis(
-      DatanodeDetails datanodeDetails, OzoneConfiguration ozoneConf,
+      DatanodeDetails datanodeDetails, ConfigurationSource ozoneConf,
       ContainerDispatcher dispatcher, ContainerController containerController,
       CertificateClient caClient, StateContext context) throws IOException {
     int localPort = ozoneConf.getInt(
@@ -754,7 +762,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   }
 
   private static List<ThreadPoolExecutor> createChunkExecutors(
-      Configuration conf) {
+      ConfigurationSource conf) {
     // TODO create single pool with N threads if using non-incremental chunks
     final int threadCount = conf.getInt(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
index 04eb30f..d2d2901 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java
@@ -18,21 +18,22 @@
 
 package org.apache.hadoop.ozone.container.common.utils;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.concurrent.locks.Lock;
+import java.util.concurrent.locks.ReentrantLock;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.utils.MetadataStore;
+import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+
 import com.google.common.base.Preconditions;
 import org.apache.commons.collections.MapIterator;
 import org.apache.commons.collections.map.LRUMap;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
 /**
  * container cache is a LRUMap that maintains the DB handles.
  */
@@ -57,7 +58,8 @@ public final class ContainerCache extends LRUMap {
    * @param conf - Configuration.
    * @return A instance of {@link ContainerCache}.
    */
-  public synchronized static ContainerCache getInstance(Configuration conf) {
+  public synchronized static ContainerCache getInstance(
+      ConfigurationSource conf) {
     if (cache == null) {
       int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_CONTAINER_CACHE_SIZE,
           OzoneConfigKeys.OZONE_CONTAINER_CACHE_DEFAULT);
@@ -111,7 +113,7 @@ public final class ContainerCache extends LRUMap {
    * @return ReferenceCountedDB.
    */
   public ReferenceCountedDB getDB(long containerID, String containerDBType,
-                             String containerDBPath, Configuration conf)
+                             String containerDBPath, ConfigurationSource conf)
       throws IOException {
     Preconditions.checkState(containerID >= 0,
         "Container ID cannot be negative.");
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index 0eee484..c3a5a41 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -19,11 +19,16 @@
 package org.apache.hadoop.ozone.container.common.volume;
 
 import javax.annotation.Nullable;
+import java.io.File;
+import java.io.IOException;
+import java.util.Objects;
+import java.util.Properties;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
@@ -31,21 +36,15 @@ import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.container.common.DataNodeLayoutVersion;
 import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.yetus.audience.InterfaceStability;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Objects;
-import java.util.Properties;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicLong;
-
 /**
  * HddsVolume represents volume in a datanode. {@link MutableVolumeSet}
  * maintains a list of HddsVolumes, one for each volume in the Datanode.
@@ -109,7 +108,7 @@ public class HddsVolume
    */
   public static class Builder {
     private final String volumeRootStr;
-    private Configuration conf;
+    private ConfigurationSource conf;
     private StorageType storageType;
 
     private String datanodeUuid;
@@ -121,7 +120,7 @@ public class HddsVolume
       this.volumeRootStr = rootDirStr;
     }
 
-    public Builder conf(Configuration config) {
+    public Builder conf(ConfigurationSource config) {
       this.conf = config;
       return this;
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
index ce5d16b..9240a85 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
@@ -32,7 +32,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.util.concurrent.MoreExecutors;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
@@ -91,7 +91,7 @@ public class HddsVolumeChecker {
    * @param conf  Configuration object.
    * @param timer {@link Timer} object used for throttling checks.
    */
-  public HddsVolumeChecker(Configuration conf, Timer timer)
+  public HddsVolumeChecker(ConfigurationSource conf, Timer timer)
       throws DiskErrorException {
     maxAllowedTimeForCheckMs = conf.getTimeDuration(
         DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
index 101e680..fe29b8b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/MutableVolumeSet.java
@@ -33,9 +33,9 @@ import java.util.concurrent.ScheduledFuture;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
@@ -65,7 +65,7 @@ public class MutableVolumeSet implements VolumeSet {
   private static final Logger LOG =
       LoggerFactory.getLogger(MutableVolumeSet.class);
 
-  private Configuration conf;
+  private ConfigurationSource conf;
 
   /**
    * Maintains a map of all active volumes in the DataNode.
@@ -106,12 +106,13 @@ public class MutableVolumeSet implements VolumeSet {
   private final HddsVolumeChecker volumeChecker;
   private Runnable failedVolumeListener;
 
-  public MutableVolumeSet(String dnUuid, Configuration conf)
+  public MutableVolumeSet(String dnUuid, ConfigurationSource conf)
       throws IOException {
     this(dnUuid, null, conf);
   }
 
-  public MutableVolumeSet(String dnUuid, String clusterID, Configuration conf)
+  public MutableVolumeSet(String dnUuid, String clusterID,
+      ConfigurationSource conf)
       throws IOException {
     this.datanodeUuid = dnUuid;
     this.clusterID = clusterID;
@@ -144,7 +145,7 @@ public class MutableVolumeSet implements VolumeSet {
   }
 
   @VisibleForTesting
-  HddsVolumeChecker getVolumeChecker(Configuration configuration)
+  HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration)
       throws DiskChecker.DiskErrorException {
     return new HddsVolumeChecker(configuration, new Timer());
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 215d1e5..e0669c7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -18,17 +18,18 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.io.IOException;
+
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
-import org.apache.hadoop.fs.StorageType;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-
 /**
  * Stores information about a disk/volume.
  */
@@ -51,13 +52,13 @@ public final class VolumeInfo {
    * Builder for VolumeInfo.
    */
   public static class Builder {
-    private final Configuration conf;
+    private final ConfigurationSource conf;
     private final String rootDir;
     private SpaceUsageCheckFactory usageCheckFactory;
     private StorageType storageType;
     private long configuredCapacity;
 
-    public Builder(String root, Configuration config) {
+    public Builder(String root, ConfigurationSource config) {
       this.rootDir = root;
       this.conf = config;
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
index 6317e63..200bfe4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
@@ -28,18 +28,14 @@ import java.time.Instant;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerDataProto;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .ContainerType;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
 import org.apache.hadoop.io.nativeio.NativeIO;
@@ -50,35 +46,25 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerPacker;
 import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
-import org.apache.hadoop.ozone.container.keyvalue.helpers
-    .KeyValueContainerLocationUtil;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_ALREADY_EXISTS;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_FILES_CREATE_ERROR;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_INTERNAL_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_ALREADY_EXISTS;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_FILES_CREATE_ERROR;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_INTERNAL_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_OPEN;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.DISK_OUT_OF_SPACE;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.ERROR_IN_COMPACT_DB;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.ERROR_IN_DB_SYNC;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.INVALID_CONTAINER_STATE;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.UNSUPPORTED_REQUEST;
-
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.DISK_OUT_OF_SPACE;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.ERROR_IN_COMPACT_DB;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.ERROR_IN_DB_SYNC;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.INVALID_CONTAINER_STATE;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -96,9 +82,10 @@ public class KeyValueContainer implements Container<KeyValueContainerData> {
   private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 
   private final KeyValueContainerData containerData;
-  private Configuration config;
+  private ConfigurationSource config;
 
-  public KeyValueContainer(KeyValueContainerData containerData, Configuration
+  public KeyValueContainer(KeyValueContainerData containerData,
+      ConfigurationSource
       ozoneConfig) {
     Preconditions.checkNotNull(containerData, "KeyValueContainerData cannot " +
         "be null");
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
index 1e53daa..95795e6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerCheck.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
@@ -62,11 +62,11 @@ public class KeyValueContainerCheck {
 
   private long containerID;
   private KeyValueContainerData onDiskContainerData; //loaded from fs/disk
-  private Configuration checkConfig;
+  private ConfigurationSource checkConfig;
 
   private String metadataPath;
 
-  public KeyValueContainerCheck(String metadataPath, Configuration conf,
+  public KeyValueContainerCheck(String metadataPath, ConfigurationSource conf,
       long containerID) {
     Preconditions.checkArgument(metadataPath != null);
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 15177fc..26e98c2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -30,9 +30,9 @@ import java.util.concurrent.locks.ReentrantLock;
 import java.util.function.Consumer;
 import java.util.function.Function;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -68,7 +68,6 @@ import org.apache.hadoop.ozone.container.keyvalue.impl.ChunkManagerFactory;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.util.AutoCloseableLock;
-import org.apache.hadoop.util.ReflectionUtils;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
@@ -114,16 +113,20 @@ public class KeyValueHandler extends Handler {
   // A lock that is held during container creation.
   private final AutoCloseableLock containerCreationLock;
 
-  public KeyValueHandler(Configuration config, String datanodeId,
+  public KeyValueHandler(ConfigurationSource config, String datanodeId,
       ContainerSet contSet, VolumeSet volSet, ContainerMetrics metrics,
       Consumer<ContainerReplicaProto> icrSender) {
     super(config, datanodeId, contSet, volSet, metrics, icrSender);
     containerType = ContainerType.KeyValueContainer;
     blockManager = new BlockManagerImpl(config);
     chunkManager = ChunkManagerFactory.createChunkManager(config);
-    volumeChoosingPolicy = ReflectionUtils.newInstance(conf.getClass(
-        HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
-            .class, VolumeChoosingPolicy.class), conf);
+    try {
+      volumeChoosingPolicy = conf.getClass(
+          HDDS_DATANODE_VOLUME_CHOOSING_POLICY, RoundRobinVolumeChoosingPolicy
+              .class, VolumeChoosingPolicy.class).newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
     maxContainerSize = (long)config.getStorageSize(
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE,
             ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT, StorageUnit.BYTES);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
index 7ceb622..99ead01 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/BlockUtils.java
@@ -18,21 +18,19 @@
 
 package org.apache.hadoop.ozone.container.keyvalue.helpers;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.common.utils.ContainerCache;
 import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 
-import java.io.IOException;
-
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.NO_SUCH_BLOCK;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.UNABLE_TO_READ_METADATA_DB;
+import com.google.common.base.Preconditions;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_BLOCK;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_READ_METADATA_DB;
 
 /**
  * Utils functions to help block functions.
@@ -55,7 +53,7 @@ public final class BlockUtils {
    * @throws StorageContainerException
    */
   public static ReferenceCountedDB getDB(KeyValueContainerData containerData,
-                                    Configuration conf) throws
+                                    ConfigurationSource conf) throws
       StorageContainerException {
     Preconditions.checkNotNull(containerData);
     ContainerCache cache = ContainerCache.getInstance(conf);
@@ -78,8 +76,8 @@ public final class BlockUtils {
    * @param container - Container data.
    * @param conf - Configuration.
    */
-  public static void removeDB(KeyValueContainerData container, Configuration
-      conf) {
+  public static void removeDB(KeyValueContainerData container,
+      ConfigurationSource conf) {
     Preconditions.checkNotNull(container);
     ContainerCache cache = ContainerCache.getInstance(conf);
     Preconditions.checkNotNull(cache);
@@ -103,7 +101,7 @@ public final class BlockUtils {
    * @param conf configuration.
    */
   public static void addDB(ReferenceCountedDB db, String containerDBPath,
-      Configuration conf) {
+      ConfigurationSource conf) {
     ContainerCache cache = ContainerCache.getInstance(conf);
     Preconditions.checkNotNull(cache);
     cache.addDB(containerDBPath, db);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
index d4ea459..9fda44b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
@@ -25,7 +25,7 @@ import java.util.List;
 import java.util.Map;
 
 import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
@@ -63,7 +63,7 @@ public final class KeyValueContainerUtil {
    * @throws IOException
    */
   public static void createContainerMetaData(File containerMetaDataPath, File
-      chunksPath, File dbFile, Configuration conf) throws IOException {
+      chunksPath, File dbFile, ConfigurationSource conf) throws IOException {
     Preconditions.checkNotNull(containerMetaDataPath);
     Preconditions.checkNotNull(conf);
 
@@ -105,7 +105,7 @@ public final class KeyValueContainerUtil {
    * @throws IOException
    */
   public static void removeContainer(KeyValueContainerData containerData,
-                                     Configuration conf)
+                                     ConfigurationSource conf)
       throws IOException {
     Preconditions.checkNotNull(containerData);
     File containerMetaDataPath = new File(containerData
@@ -132,7 +132,7 @@ public final class KeyValueContainerUtil {
    * @throws IOException
    */
   public static void parseKVContainerData(KeyValueContainerData kvContainerData,
-      Configuration config) throws IOException {
+      ConfigurationSource config) throws IOException {
 
     long containerID = kvContainerData.getContainerID();
     File metadataPath = new File(kvContainerData.getMetadataPath());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
index 58a4e8b..ee663d8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/BlockManagerImpl.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.keyvalue.impl;
 
 import com.google.common.base.Preconditions;
 import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -57,7 +57,7 @@ public class BlockManagerImpl implements BlockManager {
   private static byte[] blockCommitSequenceIdKey =
           DFSUtil.string2Bytes(OzoneConsts.BLOCK_COMMIT_SEQUENCE_ID_PREFIX);
 
-  private Configuration config;
+  private ConfigurationSource config;
 
   private static final String DB_NULL_ERR_MSG = "DB cannot be null here";
   private static final String NO_SUCH_BLOCK_ERR_MSG =
@@ -68,7 +68,7 @@ public class BlockManagerImpl implements BlockManager {
    *
    * @param conf - Ozone configuration
    */
-  public BlockManagerImpl(Configuration conf) {
+  public BlockManagerImpl(ConfigurationSource conf) {
     Preconditions.checkNotNull(conf, "Config cannot be null");
     this.config = conf;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
index c2d81b6..6b3f58d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerFactory.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.ozone.container.keyvalue.impl;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.slf4j.Logger;
@@ -39,7 +39,7 @@ public final class ChunkManagerFactory {
   private ChunkManagerFactory() {
   }
 
-  public static ChunkManager createChunkManager(Configuration conf) {
+  public static ChunkManager createChunkManager(ConfigurationSource conf) {
     boolean sync =
         conf.getBoolean(OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY,
             OzoneConfigKeys.DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
index 798629e..efd16c6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/statemachine/background/BlockDeletingService.java
@@ -18,9 +18,29 @@
 
 package org.apache.hadoop.ozone.container.keyvalue.statemachine.background;
 
-import com.google.common.collect.Lists;
+import java.io.File;
+import java.io.IOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.utils.BackgroundService;
+import org.apache.hadoop.hdds.utils.BackgroundTask;
+import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
+import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
+import org.apache.hadoop.hdds.utils.BatchOperation;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletionChoosingPolicy;
@@ -28,48 +48,21 @@ import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDeletionChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf
-    .InvalidProtocolBufferException;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.BackgroundTask;
-import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
-import org.apache.hadoop.hdds.utils.BackgroundTaskResult;
-import org.apache.hadoop.hdds.utils.BatchOperation;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+
+import com.google.common.collect.Lists;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT;
+import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Objects;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.stream.Collectors;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER_DEFAULT;
-
 /**
  * A per-datanode container block deleting service takes in charge
  * of deleting staled ozone blocks.
@@ -82,7 +75,7 @@ public class BlockDeletingService extends BackgroundService {
 
   private OzoneContainer ozoneContainer;
   private ContainerDeletionChoosingPolicy containerDeletionPolicy;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
   // Throttle number of blocks to delete per task,
   // set to 1 for testing
@@ -98,14 +91,18 @@ public class BlockDeletingService extends BackgroundService {
 
   public BlockDeletingService(OzoneContainer ozoneContainer,
       long serviceInterval, long serviceTimeout, TimeUnit timeUnit,
-      Configuration conf) {
+      ConfigurationSource conf) {
     super("BlockDeletingService", serviceInterval, timeUnit,
         BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
     this.ozoneContainer = ozoneContainer;
-    containerDeletionPolicy = ReflectionUtils.newInstance(conf.getClass(
-        ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
-        TopNOrderedContainerDeletionChoosingPolicy.class,
-        ContainerDeletionChoosingPolicy.class), conf);
+    try {
+      containerDeletionPolicy = conf.getClass(
+          ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
+          TopNOrderedContainerDeletionChoosingPolicy.class,
+          ContainerDeletionChoosingPolicy.class).newInstance();
+    } catch (Exception e) {
+      throw new RuntimeException(e);
+    }
     this.conf = conf;
     this.blockLimitPerTask =
         conf.getInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
index 297c1ca..e923d33 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
@@ -18,12 +18,15 @@
 
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers
-    .StorageContainerException;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Storage;
@@ -31,25 +34,22 @@ import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueBlockIterator;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.FileFilter;
-import java.io.IOException;
-import java.util.List;
-
 /**
  * Class used to read .container files from Volume and build container map.
  *
@@ -81,12 +81,12 @@ public class ContainerReader implements Runnable {
       ContainerReader.class);
   private HddsVolume hddsVolume;
   private final ContainerSet containerSet;
-  private final OzoneConfiguration config;
+  private final ConfigurationSource config;
   private final File hddsVolumeDir;
   private final MutableVolumeSet volumeSet;
 
   ContainerReader(MutableVolumeSet volSet, HddsVolume volume, ContainerSet cset,
-                  OzoneConfiguration conf) {
+      ConfigurationSource conf) {
     Preconditions.checkNotNull(volume);
     this.hddsVolume = volume;
     this.hddsVolumeDir = hddsVolume.getHddsRootDir();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index d82ca4f..bbbec25 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -26,7 +26,7 @@ import java.util.Map;
 import java.util.concurrent.TimeUnit;
 import java.util.function.Consumer;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerType;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -72,7 +72,7 @@ public class OzoneContainer {
 
   private final HddsDispatcher hddsDispatcher;
   private final Map<ContainerType, Handler> handlers;
-  private final OzoneConfiguration config;
+  private final ConfigurationSource config;
   private final MutableVolumeSet volumeSet;
   private final ContainerSet containerSet;
   private final XceiverServerSpi writeChannel;
@@ -90,7 +90,7 @@ public class OzoneContainer {
    * @throws DiskOutOfSpaceException
    * @throws IOException
    */
-  public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration
+  public OzoneContainer(DatanodeDetails datanodeDetails, ConfigurationSource
       conf, StateContext context, CertificateClient certClient)
       throws IOException {
     config = conf;
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
index 37a44ac..d7666ea 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/replication/SimpleContainerDownloader.java
@@ -24,7 +24,7 @@ import java.util.List;
 import java.util.concurrent.CompletableFuture;
 import java.util.function.Function;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails.Port.Name;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -46,7 +46,7 @@ public class SimpleContainerDownloader implements ContainerDownloader {
 
   private final Path workingDirectory;
 
-  public SimpleContainerDownloader(Configuration conf) {
+  public SimpleContainerDownloader(ConfigurationSource conf) {
 
     String workDirString =
         conf.get(OzoneConfigKeys.OZONE_CONTAINER_COPY_WORKDIR);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
index 6f159b4..aabde54 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ContainerTestUtils.java
@@ -17,33 +17,33 @@
 
 package org.apache.hadoop.ozone.container.common;
 
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Random;
+import java.util.UUID;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.mockito.Mockito;
 
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Random;
-import java.util.UUID;
+import org.mockito.Mockito;
 
 /**
  * Helper utility to test containers.
@@ -77,7 +77,8 @@ public final class ContainerTestUtils {
 
     StorageContainerDatanodeProtocolClientSideTranslatorPB rpcClient =
         new StorageContainerDatanodeProtocolClientSideTranslatorPB(rpcProxy);
-    return new EndpointStateMachine(address, rpcClient, conf);
+    return new EndpointStateMachine(address, rpcClient,
+        new LegacyHadoopConfigurationSource(conf));
   }
 
   public static OzoneContainer getOzoneContainer(
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index b2d412a..f696ac3 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -22,18 +22,20 @@ import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
-import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
-import org.apache.hadoop.hdds.utils.ProtocolMessageMetrics;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolServerSideTranslatorPB;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -85,11 +87,14 @@ public final class SCMTestUtils {
   /**
    * Start Datanode RPC server.
    */
-  public static RPC.Server startScmRpcServer(Configuration configuration,
+  public static RPC.Server startScmRpcServer(ConfigurationSource configuration,
       StorageContainerDatanodeProtocol server,
       InetSocketAddress rpcServerAddresss, int handlerCount) throws
       IOException {
-    RPC.setProtocolEngine(configuration,
+
+    Configuration hadoopConfig =
+        LegacyHadoopConfigurationSource.asHadoopConfiguration(configuration);
+    RPC.setProtocolEngine(hadoopConfig,
         StorageContainerDatanodeProtocolPB.class,
         ProtobufRpcEngine.class);
 
@@ -99,7 +104,7 @@ public final class SCMTestUtils {
                 new StorageContainerDatanodeProtocolServerSideTranslatorPB(
                     server, Mockito.mock(ProtocolMessageMetrics.class)));
 
-    RPC.Server scmServer = startRpcServer(configuration, rpcServerAddresss,
+    RPC.Server scmServer = startRpcServer(hadoopConfig, rpcServerAddresss,
         StorageContainerDatanodeProtocolPB.class, scmDatanodeService,
         handlerCount);
 
@@ -133,20 +138,20 @@ public final class SCMTestUtils {
   }
 
   public static HddsProtos.ReplicationType getReplicationType(
-      Configuration conf) {
+      ConfigurationSource conf) {
     return isUseRatis(conf) ?
         HddsProtos.ReplicationType.RATIS :
         HddsProtos.ReplicationType.STAND_ALONE;
   }
 
   public static HddsProtos.ReplicationFactor getReplicationFactor(
-      Configuration conf) {
+      ConfigurationSource conf) {
     return isUseRatis(conf) ?
         HddsProtos.ReplicationFactor.THREE :
         HddsProtos.ReplicationFactor.ONE;
   }
 
-  private static boolean isUseRatis(Configuration c) {
+  private static boolean isUseRatis(ConfigurationSource c) {
     return c.getBoolean(
         ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY,
         ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
index 303ebd7..a19ad64 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestBlockDeletingService.java
@@ -17,13 +17,22 @@
 
 package org.apache.hadoop.ozone.container.common;
 
-import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.utils.BackgroundService;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -35,40 +44,31 @@ import org.apache.hadoop.ozone.container.common.impl.TopNOrderedContainerDeletio
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.ChunkLayoutTestInfo;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
+import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.testutils.BlockDeletingServiceTestImpl;
-import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.apache.hadoop.hdds.utils.BackgroundService;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
+
+import com.google.common.collect.Lists;
+import org.apache.commons.io.FileUtils;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
 import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.Test;
 import org.junit.BeforeClass;
+import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.times;
@@ -119,7 +119,8 @@ public class TestBlockDeletingService {
    * creates some fake chunk files for testing.
    */
   private void createToDeleteBlocks(ContainerSet containerSet,
-      Configuration conf, int numOfContainers, int numOfBlocksPerContainer,
+      ConfigurationSource conf, int numOfContainers,
+      int numOfBlocksPerContainer,
       int numOfChunksPerBlock) throws IOException {
     for (int x = 0; x < numOfContainers; x++) {
       conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, testRoot.getAbsolutePath());
@@ -195,7 +196,7 @@ public class TestBlockDeletingService {
 
   @Test
   public void testBlockDeletion() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
     ContainerSet containerSet = new ContainerSet();
@@ -250,7 +251,7 @@ public class TestBlockDeletingService {
   @Test
   @SuppressWarnings("java:S2699") // waitFor => assertion with timeout
   public void testShutdownService() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 500,
         TimeUnit.MILLISECONDS);
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
@@ -275,7 +276,7 @@ public class TestBlockDeletingService {
 
   @Test
   public void testBlockDeletionTimeout() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, 2);
     ContainerSet containerSet = new ContainerSet();
@@ -336,7 +337,7 @@ public class TestBlockDeletingService {
   }
 
   private BlockDeletingServiceTestImpl getBlockDeletingService(
-      ContainerSet containerSet, Configuration conf) {
+      ContainerSet containerSet, ConfigurationSource conf) {
     OzoneContainer ozoneContainer = mockDependencies(containerSet);
     return new BlockDeletingServiceTestImpl(ozoneContainer, 1000, conf);
   }
@@ -363,7 +364,7 @@ public class TestBlockDeletingService {
     //
     // Each time only 1 container can be processed, so each time
     // 1 block from 1 container can be deleted.
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     // Process 1 container per interval
     conf.set(
         ScmConfigKeys.OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY,
@@ -406,7 +407,7 @@ public class TestBlockDeletingService {
     // Each time containers can be all scanned, but only 2 blocks
     // per container can be actually deleted. So it requires 2 waves
     // to cleanup all blocks.
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.setInt(OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL, 10);
     int blockLimitPerTask = 2;
     conf.setInt(OZONE_BLOCK_DELETING_LIMIT_PER_CONTAINER, blockLimitPerTask);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 0f3e7d1..65ae6ce 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -16,54 +16,48 @@
  */
 package org.apache.hadoop.ozone.container.common;
 
-import com.google.common.collect.Maps;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.UUID;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .SCMConnectionManager;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
 import org.apache.hadoop.ozone.container.common.states.DatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .InitDatanodeState;
-import org.apache.hadoop.ozone.container.common.states.datanode
-    .RunningDatanodeState;
+import org.apache.hadoop.ozone.container.common.states.datanode.InitDatanodeState;
+import org.apache.hadoop.ozone.container.common.states.datanode.RunningDatanodeState;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
+
+import com.google.common.collect.Maps;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
 import org.junit.After;
 import org.junit.Assert;
+import static org.junit.Assert.assertTrue;
 import org.junit.Before;
 import org.junit.Test;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Map;
-import java.util.UUID;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_RPC_TIMEOUT;
-import static org.junit.Assert.assertTrue;
-
 /**
  * Tests the datanode state machine class and its states.
  */
@@ -77,7 +71,7 @@ public class TestDatanodeStateMachine {
   private List<RPC.Server> scmServers;
   private List<ScmTestMock> mockServers;
   private ExecutorService executorService;
-  private Configuration conf;
+  private OzoneConfiguration conf;
   private File testRoot;
 
   @Before
@@ -403,7 +397,7 @@ public class TestDatanodeStateMachine {
         ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR, ""));
 
     confList.forEach((entry) -> {
-      Configuration perTestConf = new Configuration(conf);
+      OzoneConfiguration perTestConf = new OzoneConfiguration(conf);
       perTestConf.setStrings(entry.getKey(), entry.getValue());
       LOG.info("Test with {} = {}", entry.getKey(), entry.getValue());
       try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
index 324ab71..7b41d99 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
@@ -18,9 +18,9 @@
 
 package org.apache.hadoop.ozone.container.common.interfaces;
 
-import com.google.common.collect.Maps;
+import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
@@ -29,10 +29,11 @@ import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.impl.TestHddsDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 
+import com.google.common.collect.Maps;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -42,8 +43,6 @@ import org.junit.rules.TestRule;
 import org.junit.rules.Timeout;
 import org.mockito.Mockito;
 
-import java.util.Map;
-
 /**
  * Tests Handler interface.
  */
@@ -51,7 +50,7 @@ public class TestHandler {
   @Rule
   public TestRule timeout = new Timeout(300000);
 
-  private Configuration conf;
+  private OzoneConfiguration conf;
   private HddsDispatcher dispatcher;
   private ContainerSet containerSet;
   private VolumeSet volumeSet;
@@ -59,7 +58,7 @@ public class TestHandler {
 
   @Before
   public void setup() throws Exception {
-    this.conf = new Configuration();
+    this.conf = new OzoneConfiguration();
     this.containerSet = Mockito.mock(ContainerSet.class);
     this.volumeSet = Mockito.mock(MutableVolumeSet.class);
     DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
index aae388d..45e50dc 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportManager.java
@@ -17,16 +17,15 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
-import org.apache.hadoop.conf.Configuration;
+import java.util.concurrent.ScheduledExecutorService;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.util.concurrent.ScheduledExecutorService;
 
+import org.junit.Test;
 import static org.mockito.ArgumentMatchers.any;
 import static org.mockito.ArgumentMatchers.eq;
+import org.mockito.Mockito;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
@@ -37,7 +36,7 @@ public class TestReportManager {
 
   @Test
   public void testReportManagerInit() {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext dummyContext = Mockito.mock(StateContext.class);
     ReportPublisher dummyPublisher = Mockito.mock(ReportPublisher.class);
     ReportManager.Builder builder = ReportManager.newBuilder(conf);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
index 03f0cd4..166aadf 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisher.java
@@ -21,7 +21,7 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
 import com.google.protobuf.GeneratedMessage;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsIdFactory;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -51,7 +51,7 @@ import static org.mockito.Mockito.when;
  */
 public class TestReportPublisher {
 
-  private static Configuration config;
+  private static ConfigurationSource config;
 
   @BeforeClass
   public static void setup() {
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
index f8c5fe5..e9a34c7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/report/TestReportPublisherFactory.java
@@ -17,13 +17,11 @@
 
 package org.apache.hadoop.ozone.container.common.report;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+
 import org.junit.Assert;
 import org.junit.Rule;
 import org.junit.Test;
@@ -39,7 +37,7 @@ public class TestReportPublisherFactory {
 
   @Test
   public void testGetContainerReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
     ReportPublisher publisher = factory
         .getPublisherFor(ContainerReportsProto.class);
@@ -49,7 +47,7 @@ public class TestReportPublisherFactory {
 
   @Test
   public void testGetNodeReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
     ReportPublisher publisher = factory
         .getPublisherFor(NodeReportProto.class);
@@ -59,7 +57,7 @@ public class TestReportPublisherFactory {
 
   @Test
   public void testInvalidReportPublisher() {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     ReportPublisherFactory factory = new ReportPublisherFactory(conf);
     exception.expect(RuntimeException.class);
     exception.expectMessage("No publisher found for report");
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index 95ac87f..1c66d63 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -18,39 +18,29 @@
 
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
-import org.apache.hadoop.conf.Configuration;
+import java.net.InetSocketAddress;
+import java.util.UUID;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerAction;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .DatanodeStateMachine.DatanodeStates;
-import org.apache.hadoop.ozone.container.common.statemachine
-    .EndpointStateMachine;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates;
+import org.apache.hadoop.ozone.container.common.statemachine.EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
-import org.apache.hadoop.ozone.protocolPB
-    .StorageContainerDatanodeProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolClientSideTranslatorPB;
 
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.Mockito;
 
-import java.net.InetSocketAddress;
-import java.util.UUID;
-
 /**
  * This class tests the functionality of HeartbeatEndpointTask.
  */
@@ -86,7 +76,7 @@ public class TestHeartbeatEndpointTask {
 
   @Test
   public void testheartbeatWithNodeReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
         Mockito.mock(DatanodeStateMachine.class));
 
@@ -118,7 +108,7 @@ public class TestHeartbeatEndpointTask {
 
   @Test
   public void testheartbeatWithContainerReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
         Mockito.mock(DatanodeStateMachine.class));
 
@@ -150,7 +140,7 @@ public class TestHeartbeatEndpointTask {
 
   @Test
   public void testheartbeatWithCommandStatusReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
         Mockito.mock(DatanodeStateMachine.class));
 
@@ -182,7 +172,7 @@ public class TestHeartbeatEndpointTask {
 
   @Test
   public void testheartbeatWithContainerActions() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
         Mockito.mock(DatanodeStateMachine.class));
 
@@ -214,7 +204,7 @@ public class TestHeartbeatEndpointTask {
 
   @Test
   public void testheartbeatWithAllReports() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
         Mockito.mock(DatanodeStateMachine.class));
 
@@ -256,7 +246,7 @@ public class TestHeartbeatEndpointTask {
    */
   private HeartbeatEndpointTask getHeartbeatEndpointTask(
       StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     StateContext context = new StateContext(conf, DatanodeStates.RUNNING,
         Mockito.mock(DatanodeStateMachine.class));
     return getHeartbeatEndpointTask(conf, context, proxy);
@@ -274,7 +264,7 @@ public class TestHeartbeatEndpointTask {
    * @return HeartbeatEndpointTask
    */
   private HeartbeatEndpointTask getHeartbeatEndpointTask(
-      Configuration conf,
+      ConfigurationSource conf,
       StateContext context,
       StorageContainerDatanodeProtocolClientSideTranslatorPB proxy) {
     DatanodeDetails datanodeDetails = DatanodeDetails.newBuilder()
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index 0d9c876..57a0e55 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -17,31 +17,31 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
-import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
-import org.apache.hadoop.hdds.fs.SpaceUsageSource;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
-import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
-import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
 import java.io.File;
 import java.time.Duration;
 import java.util.Properties;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicLong;
 
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
+import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
+
 import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory;
 import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
 
 /**
  * Unit tests for {@link HddsVolume}.
@@ -50,7 +50,7 @@ public class TestHddsVolume {
 
   private static final String DATANODE_UUID = UUID.randomUUID().toString();
   private static final String CLUSTER_ID = UUID.randomUUID().toString();
-  private static final Configuration CONF = new Configuration();
+  private static final OzoneConfiguration CONF = new OzoneConfiguration();
 
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index 19ee54d..b2a39a9 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -18,25 +18,25 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.time.Duration;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
 import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
 import org.apache.hadoop.hdds.fs.SpaceUsageSource;
-import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
-import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
-import java.io.IOException;
-import java.time.Duration;
-import java.util.ArrayList;
-import java.util.List;
-
-import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
-
 /**
  * Tests {@link RoundRobinVolumeChoosingPolicy}.
  */
@@ -45,7 +45,7 @@ public class TestRoundRobinVolumeChoosingPolicy {
   private RoundRobinVolumeChoosingPolicy policy;
   private final List<HddsVolume> volumes = new ArrayList<>();
 
-  private static final Configuration CONF = new Configuration();
+  private static final OzoneConfiguration CONF = new OzoneConfiguration();
   private static final String BASE_DIR =
       getTestDir(TestRoundRobinVolumeChoosingPolicy.class.getSimpleName())
           .getAbsolutePath();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index c913d53..3bd42d8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -26,8 +26,8 @@ import java.util.List;
 import java.util.Set;
 import java.util.UUID;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -63,7 +63,7 @@ public class TestVolumeSetDiskChecks {
   @Rule
   public ExpectedException thrown = ExpectedException.none();
 
-  private Configuration conf = null;
+  private OzoneConfiguration conf = null;
 
   /**
    * Cleanup volume directories.
@@ -115,7 +115,7 @@ public class TestVolumeSetDiskChecks {
     final MutableVolumeSet volumeSet = new MutableVolumeSet(
         UUID.randomUUID().toString(), conf) {
       @Override
-      HddsVolumeChecker getVolumeChecker(Configuration configuration)
+      HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration)
           throws DiskErrorException {
         return new DummyChecker(configuration, new Timer(), numBadVolumes);
       }
@@ -139,7 +139,7 @@ public class TestVolumeSetDiskChecks {
     final MutableVolumeSet volumeSet = new MutableVolumeSet(
         UUID.randomUUID().toString(), conf) {
       @Override
-      HddsVolumeChecker getVolumeChecker(Configuration configuration)
+      HddsVolumeChecker getVolumeChecker(ConfigurationSource configuration)
           throws DiskErrorException {
         return new DummyChecker(configuration, new Timer(), numVolumes);
       }
@@ -155,8 +155,8 @@ public class TestVolumeSetDiskChecks {
    * storage directories.
    * @param numDirs
    */
-  private Configuration getConfWithDataNodeDirs(int numDirs) {
-    final Configuration ozoneConf = new OzoneConfiguration();
+  private OzoneConfiguration getConfWithDataNodeDirs(int numDirs) {
+    final OzoneConfiguration ozoneConf = new OzoneConfiguration();
     final List<String> dirs = new ArrayList<>();
     for (int i = 0; i < numDirs; ++i) {
       dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
@@ -173,7 +173,7 @@ public class TestVolumeSetDiskChecks {
   static class DummyChecker extends HddsVolumeChecker {
     private final int numBadVolumes;
 
-    DummyChecker(Configuration conf, Timer timer, int numBadVolumes)
+    DummyChecker(ConfigurationSource conf, Timer timer, int numBadVolumes)
         throws DiskErrorException {
       super(conf, timer);
       this.numBadVolumes = numBadVolumes;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
index ab4d5f6..256f8b7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueBlockIterator.java
@@ -18,49 +18,46 @@
 
 package org.apache.hadoop.ozone.container.keyvalue;
 
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collection;
+import java.util.List;
+import java.util.NoSuchElementException;
+import java.util.UUID;
+
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion;
-import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
 import org.apache.hadoop.ozone.container.common.volume.MutableVolumeSet;
+import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
-import org.apache.hadoop.ozone.container.common.utils.ReferenceCountedDB;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-
-import java.io.File;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.Collection;
-import java.util.List;
-import java.util.NoSuchElementException;
-import java.util.UUID;
 
+import com.google.common.primitives.Longs;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_LEVELDB;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_METADATA_STORE_IMPL_ROCKSDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_LEVELDB;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_ROCKSDB;
 import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
 import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
+import org.junit.After;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import org.junit.Before;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 
 /**
  * This class is used to test KeyValue container block iterator.
@@ -71,7 +68,7 @@ public class TestKeyValueBlockIterator {
   private KeyValueContainer container;
   private KeyValueContainerData containerData;
   private MutableVolumeSet volumeSet;
-  private Configuration conf;
+  private OzoneConfiguration conf;
   private File testRoot;
 
   private final String storeImpl;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
index 1f5c677..9ad6e5b 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
@@ -23,7 +23,6 @@ import java.io.IOException;
 import java.util.HashMap;
 import java.util.UUID;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -255,7 +254,7 @@ public class TestKeyValueHandler {
   @Test
   public void testVolumeSetInKeyValueHandler() throws Exception{
     File path = GenericTestUtils.getRandomizedTestDir();
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
     MutableVolumeSet
         volumeSet = new MutableVolumeSet(UUID.randomUUID().toString(), conf);
@@ -312,7 +311,7 @@ public class TestKeyValueHandler {
   @Test
   public void testCloseInvalidContainer() throws IOException {
     long containerID = 1234L;
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     KeyValueContainerData kvData = new KeyValueContainerData(containerID,
         layout,
         (long) StorageUnit.GB.toBytes(1), UUID.randomUUID().toString(),
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
index a136983..ecb7af8 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/testutils/BlockDeletingServiceTestImpl.java
@@ -16,18 +16,18 @@
  */
 package org.apache.hadoop.ozone.container.testutils;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ozone.container.keyvalue.statemachine.background
-    .BlockDeletingService;
-import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
-
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.Future;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.ozone.container.keyvalue.statemachine.background.BlockDeletingService;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
 /**
  * A test class implementation for {@link BlockDeletingService}.
  */
@@ -43,7 +43,7 @@ public class BlockDeletingServiceTestImpl
   private AtomicInteger numOfProcessed = new AtomicInteger(0);
 
   public BlockDeletingServiceTestImpl(OzoneContainer container,
-      int serviceInterval, Configuration conf) {
+      int serviceInterval, ConfigurationSource conf) {
     super(container, serviceInterval, SERVICE_TIMEOUT_IN_MILLISECONDS,
         TimeUnit.MILLISECONDS, conf);
   }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
index 2c5c817..368197a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/HddsConfServlet.java
@@ -17,30 +17,27 @@
  */
 package org.apache.hadoop.hdds.conf;
 
-import com.google.gson.Gson;
-import java.io.IOException;
-import java.io.Writer;
-
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Properties;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.ws.rs.core.HttpHeaders;
+import java.io.IOException;
+import java.io.Writer;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
 
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.HttpServer2;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.gson.Gson;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TAGS_SYSTEM_KEY;
-
 /**
  * A servlet to print out the running configuration data.
  */
@@ -63,8 +60,9 @@ public class HddsConfServlet extends HttpServlet {
    * Return the Configuration of the daemon hosting this servlet.
    * This is populated when the HttpServer starts.
    */
-  private Configuration getConfFromContext() {
-    Configuration conf = (Configuration) getServletContext().getAttribute(
+  private OzoneConfiguration getConfFromContext() {
+    OzoneConfiguration conf =
+        (OzoneConfiguration) getServletContext().getAttribute(
         HttpServer2.CONF_CONTEXT_ATTRIBUTE);
     assert conf != null;
     return conf;
@@ -127,11 +125,11 @@ public class HddsConfServlet extends HttpServlet {
   /**
    * Guts of the servlet - extracted for easy testing.
    */
-  static void writeResponse(Configuration conf,
+  static void writeResponse(OzoneConfiguration conf,
       Writer out, String format, String propertyName)
       throws IOException, IllegalArgumentException, BadFormatException {
     if (FORMAT_JSON.equals(format)) {
-      Configuration.dumpConfiguration(conf, propertyName, out);
+      OzoneConfiguration.dumpConfiguration(conf, propertyName, out);
     } else if (FORMAT_XML.equals(format)) {
       conf.writeXml(propertyName, out);
     } else {
@@ -155,7 +153,7 @@ public class HddsConfServlet extends HttpServlet {
       Writer out) throws IOException {
     String cmd = request.getParameter(COMMAND);
     Gson gson = new Gson();
-    Configuration config = getOzoneConfig();
+    OzoneConfiguration config = getOzoneConfig();
 
     switch (cmd) {
     case "getOzoneTags":
@@ -184,7 +182,7 @@ public class HddsConfServlet extends HttpServlet {
 
   }
 
-  private static Configuration getOzoneConfig() {
+  private static OzoneConfiguration getOzoneConfig() {
     return OZONE_CONFIG;
   }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
index 21a19b5..f740e43 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/CertificateSignRequest.java
@@ -18,12 +18,21 @@
  */
 package org.apache.hadoop.hdds.security.x509.certificates.utils;
 
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.io.StringReader;
+import java.io.StringWriter;
+import java.security.KeyPair;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Optional;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
 import org.apache.hadoop.hdds.security.x509.keys.SecurityUtil;
+
+import com.google.common.base.Preconditions;
 import org.apache.logging.log4j.util.Strings;
 import org.bouncycastle.asn1.ASN1EncodableVector;
 import org.bouncycastle.asn1.ASN1Object;
@@ -50,14 +59,6 @@ import org.bouncycastle.pkcs.jcajce.JcaPKCS10CertificationRequestBuilder;
 import org.bouncycastle.util.io.pem.PemObject;
 import org.bouncycastle.util.io.pem.PemReader;
 
-import java.io.IOException;
-import java.io.StringReader;
-import java.io.StringWriter;
-import java.security.KeyPair;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Optional;
-
 /**
  * A certificate sign request object that wraps operations to build a
  * PKCS10CertificationRequest to CertificateServer.
@@ -154,7 +155,7 @@ public final class CertificateSignRequest {
     private boolean digitalEncryption;
 
     public CertificateSignRequest.Builder setConfiguration(
-        Configuration configuration) {
+        ConfigurationSource configuration) {
       this.config = new SecurityConfig(configuration);
       return this;
     }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java
index 1fd6d7c..7ecc161 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/certificates/utils/SelfSignedCertificate.java
@@ -19,13 +19,23 @@
 
 package org.apache.hadoop.hdds.security.x509.certificates.utils;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.math.BigInteger;
+import java.security.KeyPair;
+import java.time.Duration;
+import java.time.LocalDate;
+import java.time.LocalTime;
+import java.time.ZoneOffset;
+import java.util.Date;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.exceptions.CertificateException;
 import org.apache.hadoop.util.Time;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import org.apache.logging.log4j.util.Strings;
 import org.bouncycastle.asn1.DEROctetString;
 import org.bouncycastle.asn1.x500.X500Name;
@@ -40,15 +50,6 @@ import org.bouncycastle.operator.ContentSigner;
 import org.bouncycastle.operator.OperatorCreationException;
 import org.bouncycastle.operator.jcajce.JcaContentSignerBuilder;
 
-import java.io.IOException;
-import java.math.BigInteger;
-import java.security.KeyPair;
-import java.time.Duration;
-import java.time.LocalDate;
-import java.time.LocalTime;
-import java.time.ZoneOffset;
-import java.util.Date;
-
 /**
  * A Self Signed Certificate with CertificateServer basic constraint can be used
  * to bootstrap a certificate infrastructure, if no external certificate is
@@ -158,7 +159,7 @@ public final class SelfSignedCertificate {
     private SecurityConfig config;
     private boolean isCA;
 
-    public Builder setConfiguration(Configuration configuration) {
+    public Builder setConfiguration(ConfigurationSource configuration) {
       this.config = new SecurityConfig(configuration);
       return this;
     }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java
index 640f5ca..1f3b665 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/security/x509/keys/HDDSKeyGenerator.java
@@ -18,16 +18,17 @@
  */
 package org.apache.hadoop.hdds.security.x509.keys;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.security.x509.SecurityConfig;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.security.KeyPair;
 import java.security.KeyPairGenerator;
 import java.security.NoSuchAlgorithmException;
 import java.security.NoSuchProviderException;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.security.x509.SecurityConfig;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
 /**
  * A class to generate Key Pair for use with Certificates.
  */
@@ -41,7 +42,7 @@ public class HDDSKeyGenerator {
    *
    * @param configuration - config
    */
-  public HDDSKeyGenerator(Configuration configuration) {
+  public HDDSKeyGenerator(ConfigurationSource configuration) {
     this.securityConfig = new SecurityConfig(configuration);
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
index 9c87018..08f04f1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/ServerUtils.java
@@ -21,8 +21,8 @@ import java.io.File;
 import java.net.InetSocketAddress;
 import java.util.Collection;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ipc.RPC;
@@ -138,7 +138,7 @@ public final class ServerUtils {
    * @param conf
    * @return
    */
-  public static File getScmDbDir(Configuration conf) {
+  public static File getScmDbDir(ConfigurationSource conf) {
     File metadataDir = getDirectoryFromConfig(conf,
         ScmConfigKeys.OZONE_SCM_DB_DIRS, "SCM");
     if (metadataDir != null) {
@@ -159,7 +159,7 @@ public final class ServerUtils {
    * @param componentName Which component's key is this
    * @return File created from the value of the key in conf.
    */
-  public static File getDirectoryFromConfig(Configuration conf,
+  public static File getDirectoryFromConfig(ConfigurationSource conf,
                                             String key,
                                             String componentName) {
     final Collection<String> metadirs = conf.getTrimmedStringCollection(key);
@@ -191,7 +191,7 @@ public final class ServerUtils {
    * @return File MetaDir
    * @throws IllegalArgumentException if the configuration setting is not set
    */
-  public static File getOzoneMetaDirPath(Configuration conf) {
+  public static File getOzoneMetaDirPath(ConfigurationSource conf) {
     File dirPath = getDirectoryFromConfig(conf,
         HddsConfigKeys.OZONE_METADATA_DIRS, "Ozone");
     if (dirPath == null) {
@@ -215,7 +215,7 @@ public final class ServerUtils {
    * @param key The configuration key which specify the directory.
    * @return The path of the directory.
    */
-  public static File getDBPath(Configuration conf, String key) {
+  public static File getDBPath(ConfigurationSource conf, String key) {
     final File dbDirPath =
         getDirectoryFromConfig(conf, key, "OM");
     if (dbDirPath != null) {
@@ -233,7 +233,7 @@ public final class ServerUtils {
     return remoteUser != null ? remoteUser.getUserName() : null;
   }
 
-  public static String getDefaultRatisDirectory(Configuration conf) {
+  public static String getDefaultRatisDirectory(ConfigurationSource conf) {
     LOG.warn("Storage directory for Ratis is not configured. It is a good " +
             "idea to map this to an SSD disk. Falling back to {}",
         HddsConfigKeys.OZONE_METADATA_DIRS);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
index 5e6d0e6..2f6df58 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/BaseHttpServer.java
@@ -27,7 +27,9 @@ import java.util.OptionalInt;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.HddsConfServlet;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -62,7 +64,7 @@ public abstract class BaseHttpServer {
       "org.eclipse.jetty.webapp.basetempdir";
 
   private HttpServer2 httpServer;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
   private InetSocketAddress httpAddress;
   private InetSocketAddress httpsAddress;
@@ -76,7 +78,8 @@ public abstract class BaseHttpServer {
 
   private boolean profilerSupport;
 
-  public BaseHttpServer(Configuration conf, String name) throws IOException {
+  public BaseHttpServer(ConfigurationSource conf, String name)
+      throws IOException {
     this.name = name;
     this.conf = conf;
     policy = getHttpPolicy(conf);
@@ -87,7 +90,7 @@ public abstract class BaseHttpServer {
       // Avoid registering o.a.h.http.PrometheusServlet in HttpServer2.
       // TODO: Replace "hadoop.prometheus.endpoint.enabled" with
       // CommonConfigurationKeysPublic.HADOOP_PROMETHEUS_ENABLED when possible.
-      conf.setBoolean("hadoop.prometheus.endpoint.enabled", false);
+      conf.set("hadoop.prometheus.endpoint.enabled", "false");
 
       HttpServer2.Builder builder = newHttpServer2BuilderForOzone(
           conf, httpAddress, httpsAddress,
@@ -140,7 +143,7 @@ public abstract class BaseHttpServer {
    * Recon to initialize their HTTP / HTTPS server.
    */
   public static HttpServer2.Builder newHttpServer2BuilderForOzone(
-      Configuration conf, final InetSocketAddress httpAddr,
+      ConfigurationSource conf, final InetSocketAddress httpAddr,
       final InetSocketAddress httpsAddr, String name, String spnegoUserNameKey,
       String spnegoKeytabFileKey) throws IOException {
     HttpConfig.Policy policy = getHttpPolicy(conf);
@@ -172,7 +175,7 @@ public abstract class BaseHttpServer {
     }
 
     if (policy.isHttpsEnabled() && httpsAddr != null) {
-      Configuration sslConf = loadSslConfiguration(conf);
+      ConfigurationSource sslConf = loadSslConfiguration(conf);
       loadSslConfToHttpServerBuilder(builder, sslConf);
 
       if (httpsAddr.getPort() == 0) {
@@ -295,7 +298,7 @@ public abstract class BaseHttpServer {
 
 
   public static HttpServer2.Builder loadSslConfToHttpServerBuilder(
-      HttpServer2.Builder builder, Configuration sslConf) {
+      HttpServer2.Builder builder, ConfigurationSource sslConf) {
     return builder
         .needsClientAuth(
             sslConf.getBoolean(OZONE_CLIENT_HTTPS_NEED_AUTH_KEY,
@@ -320,7 +323,7 @@ public abstract class BaseHttpServer {
    * @return DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY if the key is not empty
    * else return defaultKey
    */
-  public static String getSpnegoKeytabKey(Configuration conf,
+  public static String getSpnegoKeytabKey(ConfigurationSource conf,
       String defaultKey) {
     String value =
         conf.get(
@@ -339,7 +342,7 @@ public abstract class BaseHttpServer {
    * @param alias name of the credential to retreive
    * @return String credential value or null
    */
-  static String getPassword(Configuration conf, String alias) {
+  static String getPassword(ConfigurationSource conf, String alias) {
     String password = null;
     try {
       char[] passchars = conf.getPassword(alias);
@@ -357,8 +360,10 @@ public abstract class BaseHttpServer {
   /**
    * Load HTTPS-related configuration.
    */
-  public static Configuration loadSslConfiguration(Configuration conf) {
-    Configuration sslConf = new Configuration(false);
+  public static ConfigurationSource loadSslConfiguration(
+      ConfigurationSource conf) {
+    Configuration sslConf =
+        new Configuration(false);
 
     sslConf.addResource(conf.get(
         OzoneConfigKeys.OZONE_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
@@ -383,7 +388,7 @@ public abstract class BaseHttpServer {
     boolean requireClientAuth = conf.getBoolean(
         OZONE_CLIENT_HTTPS_NEED_AUTH_KEY, OZONE_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
     sslConf.setBoolean(OZONE_CLIENT_HTTPS_NEED_AUTH_KEY, requireClientAuth);
-    return sslConf;
+    return new LegacyHadoopConfigurationSource(sslConf);
   }
 
   public InetSocketAddress getHttpAddress() {
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java
index 388fc21..4d6b65b 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/FilterInitializer.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.server.http;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 
 /**
  * Initialize a javax.servlet.Filter. 
@@ -29,5 +29,5 @@ public abstract class FilterInitializer {
    * @param conf Configuration for run-time parameters
    */
   public abstract void initFilter(FilterContainer container,
-      Configuration conf);
+      ConfigurationSource conf);
 }
\ No newline at end of file
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
index 663a555..f340bdf 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpConfig.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.server.http;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.hdds.annotation.InterfaceAudience;
 import org.apache.hadoop.hdds.annotation.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 /**
@@ -61,7 +61,7 @@ public final class HttpConfig {
     }
   }
 
-  public static Policy getHttpPolicy(Configuration conf) {
+  public static Policy getHttpPolicy(ConfigurationSource conf) {
     String policyStr = conf.get(OzoneConfigKeys.OZONE_HTTP_POLICY_KEY,
         OzoneConfigKeys.OZONE_HTTP_POLICY_DEFAULT);
     HttpConfig.Policy policy = HttpConfig.Policy.fromString(policyStr);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
index 097580b..a57fe54 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2.java
@@ -17,6 +17,17 @@
  */
 package org.apache.hadoop.hdds.server.http;
 
+import javax.servlet.Filter;
+import javax.servlet.FilterChain;
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServlet;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletRequestWrapper;
+import javax.servlet.http.HttpServletResponse;
 import java.io.File;
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -37,29 +48,15 @@ import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServlet;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletRequestWrapper;
-import javax.servlet.http.HttpServletResponse;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.hadoop.hdds.annotation.InterfaceStability;
 import org.apache.hadoop.conf.ConfServlet;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration.IntegerRanges;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.annotation.InterfaceStability;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.jmx.JMXJsonServlet;
 import org.apache.hadoop.log.LogLevel;
 import org.apache.hadoop.security.AuthenticationFilterInitializer;
@@ -72,6 +69,11 @@ import org.apache.hadoop.security.ssl.SSLFactory;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
+
+import com.google.common.base.Preconditions;
+import com.google.common.collect.ImmutableMap;
+import com.google.common.collect.Lists;
+import com.sun.jersey.spi.container.servlet.ServletContainer;
 import org.eclipse.jetty.http.HttpVersion;
 import org.eclipse.jetty.server.ConnectionFactory;
 import org.eclipse.jetty.server.Connector;
@@ -192,8 +194,8 @@ public final class HttpServer2 implements FilterContainer {
   public static class Builder {
     private ArrayList<URI> endpoints = Lists.newArrayList();
     private String name;
-    private Configuration conf;
-    private Configuration sslConf;
+    private ConfigurationSource conf;
+    private ConfigurationSource sslConf;
     private String[] pathSpecs;
     private AccessControlList adminsAcl;
     private boolean securityEnabled = false;
@@ -291,7 +293,7 @@ public final class HttpServer2 implements FilterContainer {
       return this;
     }
 
-    public Builder setConf(Configuration configuration) {
+    public Builder setConf(ConfigurationSource configuration) {
       this.conf = configuration;
       return this;
     }
@@ -300,7 +302,7 @@ public final class HttpServer2 implements FilterContainer {
      * Specify the SSL configuration to load. This API provides an alternative
      * to keyStore/keyPassword/trustStore.
      */
-    public Builder setSSLConf(Configuration sslCnf) {
+    public Builder setSSLConf(ConfigurationSource sslCnf) {
       this.sslConf = sslCnf;
       return this;
     }
@@ -368,14 +370,15 @@ public final class HttpServer2 implements FilterContainer {
     }
 
     /**
-     * A wrapper of {@link Configuration#getPassword(String)}. It returns
+     * A wrapper of {@link ConfigurationSource#getPassword(String)}. It returns
      * <code>String</code> instead of <code>char[]</code>.
      *
      * @param conf the configuration
      * @param name the property name
      * @return the password string or null
      */
-    private static String getPasswordString(Configuration conf, String name)
+    private static String getPasswordString(ConfigurationSource conf,
+        String name)
         throws IOException {
       char[] passchars = conf.getPassword(name);
       if (passchars == null) {
@@ -426,7 +429,7 @@ public final class HttpServer2 implements FilterContainer {
       }
 
       if (this.conf == null) {
-        conf = new Configuration();
+        conf = new OzoneConfiguration();
       }
 
       HttpServer2 server = new HttpServer2(this);
@@ -565,7 +568,7 @@ public final class HttpServer2 implements FilterContainer {
   }
 
   private void initializeWebServer(String name, String hostName,
-      Configuration conf, String[] pathSpecs)
+      ConfigurationSource conf, String[] pathSpecs)
       throws IOException {
 
     Preconditions.checkNotNull(webAppContext);
@@ -602,7 +605,6 @@ public final class HttpServer2 implements FilterContainer {
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), xFrameParams);
     final FilterInitializer[] initializers = getFilterInitializers(conf);
     if (initializers != null) {
-      conf = new Configuration(conf);
       conf.set(BIND_ADDRESS, hostName);
       for (FilterInitializer c : initializers) {
         c.initFilter(this, conf);
@@ -654,18 +656,20 @@ public final class HttpServer2 implements FilterContainer {
   private static SignerSecretProvider constructSecretProvider(final Builder b,
       ServletContext ctx)
       throws Exception {
-    final Configuration conf = b.conf;
+    final ConfigurationSource conf = b.conf;
     Properties config = getFilterProperties(conf,
         b.authFilterConfigurationPrefix);
     return AuthenticationFilter.constructSecretProvider(
         ctx, config, b.disallowFallbackToRandomSignerSecretProvider);
   }
 
-  private static Properties getFilterProperties(Configuration conf, String
+  private static Properties getFilterProperties(ConfigurationSource conf, String
       prefix) {
     Properties prop = new Properties();
     Map<String, String> filterConfig = AuthenticationFilterInitializer
-        .getFilterConfigMap(conf, prefix);
+        .getFilterConfigMap(
+            LegacyHadoopConfigurationSource.asHadoopConfiguration(conf),
+            prefix);
     prop.putAll(filterConfig);
     return prop;
   }
@@ -678,7 +682,8 @@ public final class HttpServer2 implements FilterContainer {
   /**
    * Get an array of FilterConfiguration specified in the conf.
    */
-  private static FilterInitializer[] getFilterInitializers(Configuration conf) {
+  private static FilterInitializer[] getFilterInitializers(
+      ConfigurationSource conf) {
     if (conf == null) {
       return null;
     }
@@ -691,8 +696,12 @@ public final class HttpServer2 implements FilterContainer {
 
     FilterInitializer[] initializers = new FilterInitializer[classes.length];
     for (int i = 0; i < classes.length; i++) {
-      initializers[i] = (FilterInitializer) ReflectionUtils.newInstance(
-          classes[i], conf);
+      try {
+        initializers[i] = (FilterInitializer) classes[i].newInstance();
+      } catch (Exception e) {
+        LOG.error("Can't initialize the filter initializer {}",
+            classes[i].getCanonicalName(), e);
+      }
     }
     return initializers;
   }
@@ -703,7 +712,7 @@ public final class HttpServer2 implements FilterContainer {
    * @throws IOException
    */
   protected void addDefaultApps(ContextHandlerCollection parent,
-      final String appDir, Configuration conf) throws IOException {
+      final String appDir, ConfigurationSource conf) throws IOException {
     // set up the context for "/logs/" if "hadoop.log.dir" property is defined
     // and it's enabled.
     String logDir = System.getProperty("hadoop.log.dir");
@@ -750,7 +759,7 @@ public final class HttpServer2 implements FilterContainer {
   }
 
   private void setContextAttributes(ServletContextHandler context,
-      Configuration conf) {
+      ConfigurationSource conf) {
     context.getServletContext().setAttribute(CONF_CONTEXT_ATTRIBUTE, conf);
     context.getServletContext().setAttribute(ADMINS_ACL, adminsAcl);
   }
@@ -1110,7 +1119,7 @@ public final class HttpServer2 implements FilterContainer {
     pool.setMaxThreads(max);
   }
 
-  private void initSpnego(Configuration conf, String hostName,
+  private void initSpnego(ConfigurationSource conf, String hostName,
       String usernameConfKey, String keytabConfKey) throws IOException {
     Map<String, String> params = new HashMap<>();
     String principalInConf = conf.get(usernameConfKey);
@@ -1380,8 +1389,9 @@ public final class HttpServer2 implements FilterContainer {
   public static boolean isInstrumentationAccessAllowed(
       ServletContext servletContext, HttpServletRequest request,
       HttpServletResponse response) throws IOException {
-    Configuration conf =
-        (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+    ConfigurationSource conf =
+        (ConfigurationSource) servletContext
+            .getAttribute(CONF_CONTEXT_ATTRIBUTE);
 
     boolean access = true;
     boolean adminAccess = conf.getBoolean(
@@ -1405,8 +1415,9 @@ public final class HttpServer2 implements FilterContainer {
   public static boolean hasAdministratorAccess(
       ServletContext servletContext, HttpServletRequest request,
       HttpServletResponse response) throws IOException {
-    Configuration conf =
-        (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+    ConfigurationSource conf =
+        (ConfigurationSource) servletContext
+            .getAttribute(CONF_CONTEXT_ATTRIBUTE);
     // If there is no authorization, anybody has administrator access.
     if (!conf.getBoolean(
         CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
@@ -1679,17 +1690,14 @@ public final class HttpServer2 implements FilterContainer {
     }
   }
 
-  private Map<String, String> setHeaders(Configuration conf) {
+  private Map<String, String> setHeaders(ConfigurationSource conf) {
     Map<String, String> xFrameParams = new HashMap<>();
-    Map<String, String> headerConfigMap =
-        conf.getValByRegex(HTTP_HEADER_REGEX);
 
     xFrameParams.putAll(getDefaultHeaders());
     if (this.xFrameOptionIsEnabled) {
       xFrameParams.put(HTTP_HEADER_PREFIX + X_FRAME_OPTIONS,
           this.xFrameOption.toString());
     }
-    xFrameParams.putAll(headerConfigMap);
     return xFrameParams;
   }
 
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java
index c2d88cf..1428ba3 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/StaticUserWebFilter.java
@@ -29,7 +29,7 @@ import java.io.IOException;
 import java.security.Principal;
 import java.util.HashMap;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeys.DEFAULT_HADOOP_HTTP_STATIC_USER;
 import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_HTTP_STATIC_USER;
@@ -125,7 +125,7 @@ public class StaticUserWebFilter extends FilterInitializer {
   }
 
   @Override
-  public void initFilter(FilterContainer container, Configuration conf) {
+  public void initFilter(FilterContainer container, ConfigurationSource conf) {
     HashMap<String, String> options = new HashMap<String, String>();
 
     String username = getUsernameFromConf(conf);
@@ -139,7 +139,7 @@ public class StaticUserWebFilter extends FilterInitializer {
   /**
    * Retrieve the static username from the configuration.
    */
-  static String getUsernameFromConf(Configuration conf) {
+  static String getUsernameFromConf(ConfigurationSource conf) {
     String oldStyleUgi = conf.get(DEPRECATED_UGI_KEY);
     if (oldStyleUgi != null) {
       // We can't use the normal configuration deprecation mechanism here
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
index 17fa392..214adb2 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/HddsServerUtil.java
@@ -24,8 +24,8 @@ import java.util.Optional;
 import java.util.OptionalInt;
 import java.util.concurrent.TimeUnit;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
@@ -87,7 +87,7 @@ public final class HddsServerUtil {
    * @return Target {@code InetSocketAddress} for the SCM service endpoint.
    */
   public static InetSocketAddress getScmAddressForDataNodes(
-      Configuration conf) {
+      ConfigurationSource conf) {
     // We try the following settings in decreasing priority to retrieve the
     // target host.
     // - OZONE_SCM_DATANODE_ADDRESS_KEY
@@ -118,7 +118,7 @@ public final class HddsServerUtil {
    * @return Target {@code InetSocketAddress} for the SCM client endpoint.
    */
   public static InetSocketAddress getScmClientBindAddress(
-      Configuration conf) {
+      ConfigurationSource conf) {
     final String host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_KEY)
         .orElse(ScmConfigKeys.OZONE_SCM_CLIENT_BIND_HOST_DEFAULT);
@@ -138,7 +138,7 @@ public final class HddsServerUtil {
    * @return Target {@code InetSocketAddress} for the SCM block client endpoint.
    */
   public static InetSocketAddress getScmBlockClientBindAddress(
-      Configuration conf) {
+      ConfigurationSource conf) {
     final String host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_KEY)
         .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_BIND_HOST_DEFAULT);
@@ -158,7 +158,7 @@ public final class HddsServerUtil {
    * @return Target {@code InetSocketAddress} for the SCM security service.
    */
   public static InetSocketAddress getScmSecurityInetAddress(
-      Configuration conf) {
+      ConfigurationSource conf) {
     final String host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_KEY)
         .orElse(ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_BIND_HOST_DEFAULT);
@@ -182,7 +182,7 @@ public final class HddsServerUtil {
    * @return Target {@code InetSocketAddress} for the SCM service endpoint.
    */
   public static InetSocketAddress getScmDataNodeBindAddress(
-      Configuration conf) {
+      ConfigurationSource conf) {
     final Optional<String> host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_DATANODE_BIND_HOST_KEY);
 
@@ -203,7 +203,7 @@ public final class HddsServerUtil {
    * @return Target {@code InetSocketAddress} for the SCM service endpoint.
    */
   public static InetSocketAddress getReconDataNodeBindAddress(
-      Configuration conf) {
+      ConfigurationSource conf) {
     final Optional<String> host = getHostNameFromConfigKeys(conf,
         ReconConfigKeys.OZONE_RECON_DATANODE_BIND_HOST_KEY);
 
@@ -222,7 +222,7 @@ public final class HddsServerUtil {
    * @param conf - Configuration
    * @return long in Milliseconds.
    */
-  public static long getScmheartbeatCheckerInterval(Configuration conf) {
+  public static long getScmheartbeatCheckerInterval(ConfigurationSource conf) {
     return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
         ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL_DEFAULT,
         TimeUnit.MILLISECONDS);
@@ -235,7 +235,7 @@ public final class HddsServerUtil {
    * @param conf - Ozone Config
    * @return - HB interval in milli seconds.
    */
-  public static long getScmHeartbeatInterval(Configuration conf) {
+  public static long getScmHeartbeatInterval(ConfigurationSource conf) {
     return conf.getTimeDuration(HDDS_HEARTBEAT_INTERVAL,
         HDDS_HEARTBEAT_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
   }
@@ -247,7 +247,7 @@ public final class HddsServerUtil {
    * @param conf - Configuration.
    * @return - Long, Milliseconds to wait before flagging a node as stale.
    */
-  public static long getStaleNodeInterval(Configuration conf) {
+  public static long getStaleNodeInterval(ConfigurationSource conf) {
 
     long staleNodeIntervalMs =
         conf.getTimeDuration(OZONE_SCM_STALENODE_INTERVAL,
@@ -284,7 +284,7 @@ public final class HddsServerUtil {
    * @param conf - Configuration.
    * @return - the interval for dead node flagging.
    */
-  public static long getDeadNodeInterval(Configuration conf) {
+  public static long getDeadNodeInterval(ConfigurationSource conf) {
     long staleNodeIntervalMs = getStaleNodeInterval(conf);
     long deadNodeIntervalMs = conf.getTimeDuration(OZONE_SCM_DEADNODE_INTERVAL,
         OZONE_SCM_DEADNODE_INTERVAL_DEFAULT,
@@ -303,7 +303,7 @@ public final class HddsServerUtil {
    * @param conf - Ozone Config
    * @return - Rpc timeout in Milliseconds.
    */
-  public static long getScmRpcTimeOutInMilliseconds(Configuration conf) {
+  public static long getScmRpcTimeOutInMilliseconds(ConfigurationSource conf) {
     return conf.getTimeDuration(OZONE_SCM_HEARTBEAT_RPC_TIMEOUT,
         OZONE_SCM_HEARTBEAT_RPC_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
   }
@@ -314,7 +314,7 @@ public final class HddsServerUtil {
    * @param conf - Ozone Config
    * @return - Log warn interval.
    */
-  public static int getLogWarnInterval(Configuration conf) {
+  public static int getLogWarnInterval(ConfigurationSource conf) {
     return conf.getInt(OZONE_SCM_HEARTBEAT_LOG_WARN_INTERVAL_COUNT,
         OZONE_SCM_HEARTBEAT_LOG_WARN_DEFAULT);
   }
@@ -324,14 +324,15 @@ public final class HddsServerUtil {
    * @param conf - Conf
    * @return port number.
    */
-  public static int getContainerPort(Configuration conf) {
+  public static int getContainerPort(ConfigurationSource conf) {
     return conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
         OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
   }
 
-  public static String getOzoneDatanodeRatisDirectory(Configuration conf) {
+  public static String getOzoneDatanodeRatisDirectory(
+      ConfigurationSource conf) {
     String storageDir = conf.get(
-            OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR);
 
     if (Strings.isNullOrEmpty(storageDir)) {
       storageDir = ServerUtils.getDefaultRatisDirectory(conf);
@@ -339,15 +340,13 @@ public final class HddsServerUtil {
     return storageDir;
   }
 
-
-
   /**
    * Get the path for datanode id file.
    *
    * @param conf - Configuration
    * @return the path of datanode id as string
    */
-  public static String getDatanodeIdFilePath(Configuration conf) {
+  public static String getDatanodeIdFilePath(ConfigurationSource conf) {
     String dataNodeIDDirPath =
         conf.get(ScmConfigKeys.OZONE_SCM_DATANODE_ID_DIR);
     if (dataNodeIDDirPath == null) {
@@ -404,7 +403,7 @@ public final class HddsServerUtil {
    * @throws IllegalArgumentException if configuration is not defined or invalid
    */
   public static InetSocketAddress getScmAddressForSecurityProtocol(
-      Configuration conf) {
+      ConfigurationSource conf) {
     Optional<String> host = getHostNameFromConfigKeys(conf,
         ScmConfigKeys.OZONE_SCM_SECURITY_SERVICE_ADDRESS_KEY,
         ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY);
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
index dafa92b..d697fdf 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/MetadataStoreBuilder.java
@@ -24,7 +24,7 @@ import java.util.Map;
 import java.util.Optional;
 import java.util.concurrent.ConcurrentHashMap;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 
@@ -34,7 +34,6 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_IMPL_
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_DEFAULT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_STORE_ROCKSDB_STATISTICS_OFF;
-
 import org.iq80.leveldb.Options;
 import org.rocksdb.BlockBasedTableConfig;
 import org.rocksdb.Statistics;
@@ -52,11 +51,11 @@ public class MetadataStoreBuilder {
   private File dbFile;
   private long cacheSize;
   private boolean createIfMissing = true;
-  private Optional<Configuration> optionalConf = Optional.empty();
+  private Optional<ConfigurationSource> optionalConf = Optional.empty();
   private String dbType;
   @VisibleForTesting
-  public static final Map<Configuration, org.rocksdb.Options> CACHED_OPTS =
-      new ConcurrentHashMap<>();
+  public static final Map<ConfigurationSource, org.rocksdb.Options>
+      CACHED_OPTS = new ConcurrentHashMap<>();
   @VisibleForTesting
   public static final OzoneConfiguration DEFAULT_CONF =
       new OzoneConfiguration();
@@ -80,7 +79,7 @@ public class MetadataStoreBuilder {
     return this;
   }
 
-  public MetadataStoreBuilder setConf(Configuration configuration) {
+  public MetadataStoreBuilder setConf(ConfigurationSource configuration) {
     this.optionalConf = Optional.of(configuration);
     return this;
   }
@@ -102,7 +101,7 @@ public class MetadataStoreBuilder {
     }
 
     // Build db store based on configuration
-    final Configuration conf = optionalConf.orElse(DEFAULT_CONF);
+    final ConfigurationSource conf = optionalConf.orElse(DEFAULT_CONF);
 
     if (dbType == null) {
       LOG.debug("dbType is null, using ");
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index c9906d1..88d509a 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -29,7 +29,7 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
@@ -73,7 +73,7 @@ public final class DBStoreBuilder {
   private String dbname;
   private Path dbPath;
   private List<String> tableNames;
-  private Configuration configuration;
+  private ConfigurationSource configuration;
   private CodecRegistry registry;
   private String rocksDbStat;
   private RocksDBConfiguration rocksDBConfiguration;
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
index 9735d2c..1ebde84 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/TestServerUtils.java
@@ -17,21 +17,20 @@
  */
 package org.apache.hadoop.hdds.server;
 
-import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.test.PathUtils;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-
-import java.io.File;
 
+import org.apache.commons.io.FileUtils;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
 
 /**
  * Unit tests for {@link ServerUtils}.
@@ -49,7 +48,7 @@ public class TestServerUtils {
     final File testDir = PathUtils.getTestDir(TestServerUtils.class);
     final File dbDir = new File(testDir, "scmDbDir");
     final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
 
@@ -72,7 +71,7 @@ public class TestServerUtils {
   public void testGetScmDbDirWithFallback() {
     final File testDir = PathUtils.getTestDir(TestServerUtils.class);
     final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
     try {
       assertFalse(metaDir.exists());
@@ -99,7 +98,7 @@ public class TestServerUtils {
   public void ozoneMetadataDirAcceptsSingleItem() {
     final File testDir = PathUtils.getTestDir(TestServerUtils.class);
     final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
 
     try {
@@ -113,7 +112,7 @@ public class TestServerUtils {
 
   @Test
   public void ozoneMetadataDirRejectsList() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, "/data/meta1,/data/meta2");
     thrown.expect(IllegalArgumentException.class);
 
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java
index d1832e2..1730df6 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestBaseHttpServer.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.server.http;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 
 import org.junit.Assert;
 import org.junit.Test;
@@ -28,7 +28,7 @@ import org.junit.Test;
 public class TestBaseHttpServer {
   @Test
   public void getBindAddress() throws Exception {
-    Configuration conf = new Configuration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set("enabled", "false");
 
     BaseHttpServer baseHttpServer = new BaseHttpServer(conf, "test") {
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
index bc53c7a..3eb832f 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
@@ -28,7 +28,6 @@ import java.util.NoSuchElementException;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
@@ -90,7 +89,7 @@ public class TestMetadataStore {
     testDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
         + "-" + storeImpl.toLowerCase());
 
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
 
     store = MetadataStoreBuilder.newBuilder()
@@ -110,7 +109,7 @@ public class TestMetadataStore {
 
   @Test
   public void testIterator() throws Exception {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
     File dbDir = GenericTestUtils.getRandomizedTestDir();
     MetadataStore dbStore = MetadataStoreBuilder.newBuilder()
@@ -166,7 +165,7 @@ public class TestMetadataStore {
   @Test
   public void testMetaStoreConfigDifferentFromType() throws IOException {
 
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
     String dbType;
     GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
@@ -193,7 +192,7 @@ public class TestMetadataStore {
   @Test
   public void testdbTypeNotSet() throws IOException {
 
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
     GenericTestUtils.setLogLevel(MetadataStoreBuilder.LOG, Level.DEBUG);
     GenericTestUtils.LogCapturer logCapturer =
@@ -460,7 +459,7 @@ public class TestMetadataStore {
   @Test
   public void testDestroyDB() throws IOException {
     // create a new DB to test db destroy
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
 
     File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
@@ -485,7 +484,7 @@ public class TestMetadataStore {
 
   @Test
   public void testBatchWrite() throws IOException {
-    Configuration conf = new OzoneConfiguration();
+    OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OzoneConfigKeys.OZONE_METADATA_STORE_IMPL, storeImpl);
 
     File dbDir = GenericTestUtils.getTestDir(getClass().getSimpleName()
diff --git a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java
index 29c7803..610e898 100644
--- a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java
+++ b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRocksDBStoreMBean.java
@@ -17,7 +17,13 @@
  */
 package org.apache.hadoop.hdds.utils;
 
-import org.apache.hadoop.conf.Configuration;
+import javax.management.MBeanServer;
+import java.io.File;
+import java.io.IOException;
+import java.lang.management.ManagementFactory;
+import java.util.HashMap;
+import java.util.Map;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.metrics2.AbstractMetric;
 import org.apache.hadoop.metrics2.MetricsCollector;
@@ -29,27 +35,20 @@ import org.apache.hadoop.metrics2.MetricsTag;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import javax.management.MBeanServer;
-import java.io.File;
-import java.io.IOException;
-import java.lang.management.ManagementFactory;
-import java.util.HashMap;
-import java.util.Map;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
+import org.junit.Assert;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * Test the JMX interface for the rocksdb metastore implementation.
  */
 public class TestRocksDBStoreMBean {
   
-  private Configuration conf;
+  private OzoneConfiguration conf;
   
   @Before
   public void init() throws Exception {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
index 25457f7..7f954b7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/SCMCommonPlacementPolicy.java
@@ -17,21 +17,22 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.stream.Collectors;
-
 /**
  * This policy implements a set of invariants which are common
  * for all basic placement policies, acts as the repository of helper
@@ -43,15 +44,16 @@ public abstract class SCMCommonPlacementPolicy implements PlacementPolicy {
       LoggerFactory.getLogger(SCMCommonPlacementPolicy.class);
   private final NodeManager nodeManager;
   private final Random rand;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
   /**
    * Constructor.
    *
    * @param nodeManager NodeManager
-   * @param conf Configuration class.
+   * @param conf        Configuration class.
    */
-  public SCMCommonPlacementPolicy(NodeManager nodeManager, Configuration conf) {
+  public SCMCommonPlacementPolicy(NodeManager nodeManager,
+      ConfigurationSource conf) {
     this.nodeManager = nodeManager;
     this.rand = new Random();
     this.conf = conf;
@@ -80,7 +82,7 @@ public abstract class SCMCommonPlacementPolicy implements PlacementPolicy {
    *
    * @return Configuration
    */
-  public Configuration getConf() {
+  public ConfigurationSource getConf() {
     return conf;
   }
 
@@ -95,11 +97,10 @@ public abstract class SCMCommonPlacementPolicy implements PlacementPolicy {
    * 3. if a set of containers are requested, we either meet the required
    * number of nodes or we fail that request.
    *
-   *
    * @param excludedNodes - datanodes with existing replicas
-   * @param favoredNodes - list of nodes preferred.
+   * @param favoredNodes  - list of nodes preferred.
    * @param nodesRequired - number of datanodes required.
-   * @param sizeRequired - size required for the container or block.
+   * @param sizeRequired  - size required for the container or block.
    * @return list of datanodes chosen.
    * @throws SCMException SCM exception.
    */
@@ -162,7 +163,7 @@ public abstract class SCMCommonPlacementPolicy implements PlacementPolicy {
    * expected number of nodes.
    *
    * @param nodesRequired - Nodes Required
-   * @param healthyNodes - List of Nodes in the result set.
+   * @param healthyNodes  - List of Nodes in the result set.
    * @return List of Datanodes that can be used for placement.
    * @throws SCMException SCMException
    */
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index 8c09c53..e81229d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.hdds.scm.block;
 
+import javax.management.ObjectName;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -23,19 +24,16 @@ import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.concurrent.TimeUnit;
-import javax.management.ObjectName;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.ContainerBlockID;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.ScmUtils;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
-import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock;
@@ -44,24 +42,21 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.safemode.SafeModePrecheck;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.utils.UniqueId;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.hdds.utils.UniqueId;
+
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.INVALID_BLOCK_SIZE;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .INVALID_BLOCK_SIZE;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-
 
 /** Block Manager manages the block access for SCM. */
 public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
@@ -89,7 +84,7 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
    * @param scm
    * @throws IOException
    */
-  public BlockManagerImpl(final Configuration conf,
+  public BlockManagerImpl(final ConfigurationSource conf,
                           final StorageContainerManager scm) {
     Objects.requireNonNull(scm, "SCM cannot be null");
     this.pipelineManager = scm.getPipelineManager();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 1752e2a..08639ba 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -30,7 +30,7 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto.DeleteBlockTransactionResult;
@@ -77,7 +77,7 @@ public class DeletedBlockLogImpl
   // Maps txId to set of DNs which are successful in committing the transaction
   private Map<Long, Set<UUID>> transactionToDNsCommitMap;
 
-  public DeletedBlockLogImpl(Configuration conf,
+  public DeletedBlockLogImpl(ConfigurationSource conf,
                              ContainerManager containerManager,
                              SCMMetadataStore scmMetadataStore) {
     maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
index 74db22d..5ca75d2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/SCMBlockDeletingService.java
@@ -18,7 +18,7 @@ package org.apache.hadoop.hdds.scm.block;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -83,7 +83,7 @@ public class SCMBlockDeletingService extends BackgroundService {
   public SCMBlockDeletingService(DeletedBlockLog deletedBlockLog,
       ContainerManager containerManager, NodeManager nodeManager,
       EventPublisher eventPublisher, long interval, long serviceTimeout,
-      Configuration conf) {
+      ConfigurationSource conf) {
     super("SCMBlockDeletingService", interval, TimeUnit.MILLISECONDS,
         BLOCK_DELETING_SERVICE_CORE_POOL_SIZE, serviceTimeout);
     this.deletedBlockLog = deletedBlockLog;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 4265432..5a22521 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -17,9 +17,6 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
-    .FAILED_TO_CHANGE_CONTAINER_STATE;
-
 import java.io.IOException;
 import java.util.HashSet;
 import java.util.List;
@@ -29,8 +26,8 @@ import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -43,15 +40,15 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.common.statemachine.StateMachine;
 import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Preconditions;
 import com.google.common.util.concurrent.AtomicLongMap;
+import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.FAILED_TO_CHANGE_CONTAINER_STATE;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A container state manager keeps track of container states and returns
@@ -132,7 +129,7 @@ public class ContainerStateManager {
    * TODO : Add Container Tags so we know which containers are owned by SCM.
    */
   @SuppressWarnings("unchecked")
-  public ContainerStateManager(final Configuration configuration) {
+  public ContainerStateManager(final ConfigurationSource configuration) {
 
     // Initialize the container state machine.
     final Set<HddsProtos.LifeCycleState> finalStates = new HashSet();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 3838b9d..38c3d11 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -16,29 +16,6 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.primitives.Longs;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
-import org.apache.hadoop.hdds.server.ServerUtils;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.hdds.utils.BatchOperation;
-import org.apache.hadoop.hdds.utils.MetadataStore;
-import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -53,9 +30,32 @@ import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.container.metrics.SCMContainerManagerMetrics;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
+import org.apache.hadoop.hdds.server.ServerUtils;
+import org.apache.hadoop.hdds.utils.BatchOperation;
+import org.apache.hadoop.hdds.utils.MetadataStore;
+import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.primitives.Longs;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_CONTAINER_DB;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * ContainerManager class contains the mapping from a name to a pipeline
@@ -81,11 +81,11 @@ public class SCMContainerManager implements ContainerManager {
    * passed to LevelDB and this memory is allocated in Native code space.
    * CacheSize is specified
    * in MB.
-   * @param conf - {@link Configuration}
+   * @param conf - {@link ConfigurationSource}
    * @param pipelineManager - {@link PipelineManager}
    * @throws IOException on Failure.
    */
-  public SCMContainerManager(final Configuration conf,
+  public SCMContainerManager(final ConfigurationSource conf,
       PipelineManager pipelineManager)
       throws IOException {
 
@@ -612,7 +612,7 @@ public class SCMContainerManager implements ContainerManager {
     }
   }
 
-  protected File getContainerDBPath(Configuration conf) {
+  protected File getContainerDBPath(ConfigurationSource conf) {
     File metaDir = ServerUtils.getScmDbDir(conf);
     return new File(metaDir, SCM_CONTAINER_DB);
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
index a3024df..6833765 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/ContainerPlacementPolicyFactory.java
@@ -16,17 +16,18 @@
  * limitations under the License.
  */
 package org.apache.hadoop.hdds.scm.container.placement.algorithms;
-import org.apache.hadoop.conf.Configuration;
+import java.lang.reflect.Constructor;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.net.NetworkTopology;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.lang.reflect.Constructor;
-
 /**
  * A factory to create container placement instance based on configuration
  * property ozone.scm.container.placement.classname.
@@ -44,7 +45,7 @@ public final class ContainerPlacementPolicyFactory {
 
 
   public static PlacementPolicy getPolicy(
-      Configuration conf, final NodeManager nodeManager,
+      ConfigurationSource conf, final NodeManager nodeManager,
       NetworkTopology clusterMap, final boolean fallback,
       SCMContainerPlacementMetrics metrics) throws SCMException{
     final Class<? extends PlacementPolicy> placementClass = conf
@@ -54,7 +55,7 @@ public final class ContainerPlacementPolicyFactory {
     Constructor<? extends PlacementPolicy> constructor;
     try {
       constructor = placementClass.getDeclaredConstructor(NodeManager.class,
-          Configuration.class, NetworkTopology.class, boolean.class,
+          ConfigurationSource.class, NetworkTopology.class, boolean.class,
           SCMContainerPlacementMetrics.class);
       LOG.info("Create container placement policy of type {}",
               placementClass.getCanonicalName());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
index 1909344..7d2db05 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementCapacity.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.container.placement.algorithms;
 
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
@@ -80,7 +80,7 @@ public final class SCMContainerPlacementCapacity
    * @param conf Configuration
    */
   public SCMContainerPlacementCapacity(final NodeManager nodeManager,
-      final Configuration conf, final NetworkTopology networkTopology,
+      final ConfigurationSource conf, final NetworkTopology networkTopology,
       final boolean fallback, final SCMContainerPlacementMetrics metrics) {
     super(nodeManager, conf);
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
index 8933fe9..72193ff 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRackAware.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.container.placement.algorithms;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -68,7 +68,7 @@ public final class SCMContainerPlacementRackAware
    *                 for closed container placement.
    */
   public SCMContainerPlacementRackAware(final NodeManager nodeManager,
-      final Configuration conf, final NetworkTopology networkTopology,
+      final ConfigurationSource conf, final NetworkTopology networkTopology,
       final boolean fallback, final SCMContainerPlacementMetrics metrics) {
     super(nodeManager, conf);
     this.networkTopology = networkTopology;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
index ce5d10d..4927517 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/SCMContainerPlacementRandom.java
@@ -18,7 +18,7 @@
 package org.apache.hadoop.hdds.scm.container.placement.algorithms;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -52,7 +52,7 @@ public final class SCMContainerPlacementRandom extends SCMCommonPlacementPolicy
    * @param conf Config
    */
   public SCMContainerPlacementRandom(final NodeManager nodeManager,
-      final Configuration conf, final NetworkTopology networkTopology,
+      final ConfigurationSource conf, final NetworkTopology networkTopology,
       final boolean fallback, final SCMContainerPlacementMetrics metrics) {
     super(nodeManager, conf);
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
index 1dc924b..a40a63a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
@@ -6,9 +6,9 @@
  * to you under the Apache License, Version 2.0 (the
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -30,16 +30,17 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
 public class NewNodeHandler implements EventHandler<DatanodeDetails> {
 
   private final PipelineManager pipelineManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
-  public NewNodeHandler(PipelineManager pipelineManager, Configuration conf) {
+  public NewNodeHandler(PipelineManager pipelineManager,
+      ConfigurationSource conf) {
     this.pipelineManager = pipelineManager;
     this.conf = conf;
   }
 
   @Override
   public void onMessage(DatanodeDetails datanodeDetails,
-                        EventPublisher publisher) {
+      EventPublisher publisher) {
     pipelineManager.triggerPipelineCreation();
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
index cacf077..b6248aa 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeStateManager.java
@@ -18,43 +18,47 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
+import java.io.Closeable;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.UUID;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.function.Predicate;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.node.states.*;
 import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
+import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.node.states.NodeStateMap;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.ozone.common.statemachine
-    .InvalidStateTransitionException;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
 import org.apache.hadoop.ozone.common.statemachine.StateMachine;
 import org.apache.hadoop.util.Time;
 import org.apache.hadoop.util.concurrent.HadoopExecutors;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.Closeable;
-import java.util.*;
-import java.util.concurrent.ScheduledExecutorService;
-import java.util.concurrent.ScheduledFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.function.Predicate;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DEADNODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_STALENODE_INTERVAL;
-
 /**
  * NodeStateManager maintains the state of all the datanodes in the cluster. All
  * the node state change should happen only via NodeStateManager. It also
@@ -144,7 +148,8 @@ public class NodeStateManager implements Runnable, Closeable {
    *
    * @param conf Configuration
    */
-  public NodeStateManager(Configuration conf, EventPublisher eventPublisher) {
+  public NodeStateManager(ConfigurationSource conf,
+      EventPublisher eventPublisher) {
     this.nodeStateMap = new NodeStateMap();
     this.node2PipelineMap = new Node2PipelineMap();
     this.eventPublisher = eventPublisher;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
index 5976c17..cc32f84 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
@@ -31,7 +31,7 @@ public class NonHealthyToHealthyNodeHandler
     implements EventHandler<DatanodeDetails> {
 
   private final PipelineManager pipelineManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
   public NonHealthyToHealthyNodeHandler(
       PipelineManager pipelineManager, OzoneConfiguration conf) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
index 26e8f5f..5530e73 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/StaleNodeHandler.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
@@ -41,7 +41,7 @@ public class StaleNodeHandler implements EventHandler<DatanodeDetails> {
 
   private final NodeManager nodeManager;
   private final PipelineManager pipelineManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
 
   public StaleNodeHandler(NodeManager nodeManager,
       PipelineManager pipelineManager, OzoneConfiguration conf) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
index 001d185..f7f1d52 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -41,11 +41,11 @@ class BackgroundPipelineCreator {
   private final Scheduler scheduler;
   private final AtomicBoolean isPipelineCreatorRunning;
   private final PipelineManager pipelineManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private ScheduledFuture<?> periodicTask;
 
   BackgroundPipelineCreator(PipelineManager pipelineManager,
-      Scheduler scheduler, Configuration conf) {
+      Scheduler scheduler, ConfigurationSource conf) {
     this.pipelineManager = pipelineManager;
     this.conf = conf;
     this.scheduler = scheduler;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
index ae449dc..0720694 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineActionHandler.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
@@ -44,7 +44,7 @@ public class PipelineActionHandler
       LoggerFactory.getLogger(PipelineActionHandler.class);
 
   private final PipelineManager pipelineManager;
-  private final Configuration ozoneConf;
+  private final ConfigurationSource ozoneConf;
 
   public PipelineActionHandler(PipelineManager pipelineManager,
       OzoneConfiguration conf) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
index 9e5353a..e1cf382 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdds.scm.pipeline;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
@@ -40,7 +40,7 @@ public class PipelineFactory {
   private Map<ReplicationType, PipelineProvider> providers;
 
   PipelineFactory(NodeManager nodeManager, PipelineStateManager stateManager,
-      Configuration conf, EventPublisher eventPublisher) {
+      ConfigurationSource conf, EventPublisher eventPublisher) {
     providers = new HashMap<>();
     providers.put(ReplicationType.STAND_ALONE,
         new SimplePipelineProvider(nodeManager, stateManager));
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index e96b120..b6a6858 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.pipeline;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -54,7 +54,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
       LoggerFactory.getLogger(PipelinePlacementPolicy.class);
   private final NodeManager nodeManager;
   private final PipelineStateManager stateManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final int heavyNodeCriteria;
 
   /**
@@ -66,7 +66,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
    * @param conf        Configuration
    */
   public PipelinePlacementPolicy(final NodeManager nodeManager,
-      final PipelineStateManager stateManager, final Configuration conf) {
+      final PipelineStateManager stateManager, final ConfigurationSource conf) {
     super(nodeManager, conf);
     this.nodeManager = nodeManager;
     this.conf = conf;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index 1a93f22..f45b3a9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.pipeline;
 
 import java.io.IOException;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -50,13 +50,13 @@ public class PipelineReportHandler implements
   private static final Logger LOGGER = LoggerFactory.getLogger(
       PipelineReportHandler.class);
   private final PipelineManager pipelineManager;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final SafeModeManager scmSafeModeManager;
   private final boolean pipelineAvailabilityCheck;
   private final SCMPipelineMetrics metrics;
 
   public PipelineReportHandler(SafeModeManager scmSafeModeManager,
-      PipelineManager pipelineManager, Configuration conf) {
+      PipelineManager pipelineManager, ConfigurationSource conf) {
     Preconditions.checkNotNull(pipelineManager);
     this.scmSafeModeManager = scmSafeModeManager;
     this.pipelineManager = pipelineManager;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 9d7c996..4d91541 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -18,11 +18,14 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -32,12 +35,10 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.ozone.protocol.commands.ClosePipelineCommand;
 import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
 import org.apache.hadoop.ozone.protocol.commands.CreatePipelineCommand;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
-import java.util.List;
-
 /**
  * Implements Api for creating ratis pipelines.
  */
@@ -46,14 +47,14 @@ public class RatisPipelineProvider extends PipelineProvider {
   private static final Logger LOG =
       LoggerFactory.getLogger(RatisPipelineProvider.class);
 
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private final EventPublisher eventPublisher;
   private final PipelinePlacementPolicy placementPolicy;
   private int pipelineNumberLimit;
   private int maxPipelinePerDatanode;
 
   RatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, Configuration conf,
+      PipelineStateManager stateManager, ConfigurationSource conf,
       EventPublisher eventPublisher) {
     super(nodeManager, stateManager);
     this.conf = conf;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index db9260e..5c9b202 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -21,11 +21,12 @@ import java.io.IOException;
 import java.util.List;
 import java.util.stream.Collectors;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.ratis.RatisHelper;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+
 import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.apache.ratis.protocol.RaftGroup;
@@ -56,7 +57,8 @@ public final class RatisPipelineUtils {
    * @param grpcTlsConfig
    * @throws IOException
    */
-  public static void destroyPipeline(Pipeline pipeline, Configuration ozoneConf,
+  public static void destroyPipeline(Pipeline pipeline,
+      ConfigurationSource ozoneConf,
       GrpcTlsConfig grpcTlsConfig) {
     final RaftGroup group = RatisHelper.newRaftGroup(pipeline);
     if (LOG.isDebugEnabled()) {
@@ -82,7 +84,8 @@ public final class RatisPipelineUtils {
    * @throws IOException
    */
   static void destroyPipeline(DatanodeDetails dn, PipelineID pipelineID,
-      Configuration ozoneConf, GrpcTlsConfig grpcTlsConfig) throws IOException {
+      ConfigurationSource ozoneConf, GrpcTlsConfig grpcTlsConfig)
+      throws IOException {
     final String rpcType = ozoneConf
         .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
             ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index 26908b1..200b358 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -18,10 +18,25 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import javax.management.ObjectName;
+import java.io.File;
+import java.io.IOException;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -33,34 +48,19 @@ import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
 import org.apache.hadoop.hdds.server.ServerUtils;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.metrics2.util.MBeans;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.hdds.utils.MetadataKeyFilters;
 import org.apache.hadoop.hdds.utils.MetadataStore;
 import org.apache.hadoop.hdds.utils.MetadataStoreBuilder;
 import org.apache.hadoop.hdds.utils.Scheduler;
+import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.management.ObjectName;
-import java.io.File;
-import java.io.IOException;
-import java.time.Duration;
-import java.time.Instant;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.Set;
-import java.util.Collection;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.stream.Collectors;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import static org.apache.hadoop.ozone.OzoneConsts.SCM_PIPELINE_DB;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Implements api needed for management of pipelines. All the write operations
@@ -82,7 +82,7 @@ public class SCMPipelineManager implements PipelineManager {
   private final EventPublisher eventPublisher;
   private final NodeManager nodeManager;
   private final SCMPipelineMetrics metrics;
-  private final Configuration conf;
+  private final ConfigurationSource conf;
   private long pipelineWaitDefaultTimeout;
   // Pipeline Manager MXBean
   private ObjectName pmInfoBean;
@@ -92,7 +92,7 @@ public class SCMPipelineManager implements PipelineManager {
   // to prevent pipelines being created until sufficient nodes have registered.
   private final AtomicBoolean pipelineCreationAllowed;
 
-  public SCMPipelineManager(Configuration conf, NodeManager nodeManager,
+  public SCMPipelineManager(ConfigurationSource conf, NodeManager nodeManager,
       EventPublisher eventPublisher)
       throws IOException {
     this(conf, nodeManager, eventPublisher, null, null);
@@ -102,7 +102,8 @@ public class SCMPipelineManager implements PipelineManager {
     initializePipelineState();
   }
 
-  protected SCMPipelineManager(Configuration conf, NodeManager nodeManager,
+  protected SCMPipelineManager(ConfigurationSource conf,
+      NodeManager nodeManager,
                                EventPublisher eventPublisher,
                                PipelineStateManager pipelineStateManager,
                                PipelineFactory pipelineFactory)
@@ -637,7 +638,7 @@ public class SCMPipelineManager implements PipelineManager {
     pipelineFactory.shutdown();
   }
 
-  protected File getPipelineDBPath(Configuration configuration) {
+  protected File getPipelineDBPath(ConfigurationSource configuration) {
     File metaDir = ServerUtils.getScmDbDir(configuration);
     return new File(metaDir, SCM_PIPELINE_DB);
   }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
index 8eadeb3..2d7466e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/ContainerSafeModeRule.java
@@ -24,7 +24,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
@@ -51,7 +51,7 @@ public class ContainerSafeModeRule extends
   private AtomicLong containerWithMinReplicas = new AtomicLong(0);
 
   public ContainerSafeModeRule(String ruleName, EventQueue eventQueue,
-      Configuration conf,
+      ConfigurationSource conf,
       List<ContainerInfo> containers, SCMSafeModeManager manager) {
     super(manager, ruleName, eventQueue);
     safeModeCutoff = conf.getDouble(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
index 1029d71..0afbd27 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/DataNodeSafeModeRule.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.safemode;
 import java.util.HashSet;
 import java.util.UUID;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer.NodeRegistrationContainerReport;
@@ -42,7 +42,7 @@ public class DataNodeSafeModeRule extends
   private HashSet<UUID> registeredDnSet;
 
   public DataNodeSafeModeRule(String ruleName, EventQueue eventQueue,
-      Configuration conf,
+      ConfigurationSource conf,
       SCMSafeModeManager manager) {
     super(manager, ruleName, eventQueue);
     requiredDns = conf.getInt(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
index 688125f..bd58a06 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/HealthyPipelineSafeModeRule.java
@@ -19,20 +19,21 @@ package org.apache.hadoop.hdds.scm.safemode;
 
 import java.util.HashSet;
 import java.util.Set;
-import org.apache.hadoop.conf.Configuration;
+
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
-import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.hdds.server.events.TypedEvent;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Class defining Safe mode exit criteria for Pipelines.
@@ -52,7 +53,7 @@ public class HealthyPipelineSafeModeRule extends SafeModeExitRule<Pipeline> {
 
   HealthyPipelineSafeModeRule(String ruleName, EventQueue eventQueue,
       PipelineManager pipelineManager,
-      SCMSafeModeManager manager, Configuration configuration) {
+      SCMSafeModeManager manager, ConfigurationSource configuration) {
     super(manager, ruleName, eventQueue);
     healthyPipelinesPercent =
         configuration.getDouble(HddsConfigKeys.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
index 0783d02..bce4af5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/OneReplicaPipelineSafeModeRule.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.safemode;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
@@ -54,7 +54,7 @@ public class OneReplicaPipelineSafeModeRule extends
 
   public OneReplicaPipelineSafeModeRule(String ruleName, EventQueue eventQueue,
       PipelineManager pipelineManager,
-      SCMSafeModeManager safeModeManager, Configuration configuration) {
+      SCMSafeModeManager safeModeManager, ConfigurationSource configuration) {
     super(safeModeManager, ruleName, eventQueue);
 
     double percent =
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
index 8537761..c72d775 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SCMSafeModeManager.java
@@ -17,21 +17,22 @@
  */
 package org.apache.hadoop.hdds.scm.safemode;
 
-import com.google.common.annotations.VisibleForTesting;
-
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.conf.Configuration;
+
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+
+import com.google.common.annotations.VisibleForTesting;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -85,7 +86,7 @@ public class SCMSafeModeManager implements SafeModeManager {
 
   private Map<String, SafeModeExitRule> exitRules = new HashMap(1);
   private Set<String> preCheckRules = new HashSet<>(1);
-  private Configuration config;
+  private ConfigurationSource config;
   private static final String CONT_EXIT_RULE = "ContainerSafeModeRule";
   private static final String DN_EXIT_RULE = "DataNodeSafeModeRule";
   private static final String HEALTHY_PIPELINE_EXIT_RULE =
@@ -101,7 +102,7 @@ public class SCMSafeModeManager implements SafeModeManager {
 
   private final SafeModeMetrics safeModeMetrics;
 
-  public SCMSafeModeManager(Configuration conf,
+  public SCMSafeModeManager(ConfigurationSource conf,
       List<ContainerInfo> allContainers, PipelineManager pipelineManager,
       EventQueue eventQueue) {
     this.config = conf;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java
index ef70b00..bbf8b3d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModeHandler.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.hdds.scm.safemode;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -51,7 +51,7 @@ public class SafeModeHandler implements EventHandler<SafeModeStatus> {
    * SafeModeHandler, to handle the logic once we exit safe mode.
    * @param configuration
    */
-  public SafeModeHandler(Configuration configuration) {
+  public SafeModeHandler(ConfigurationSource configuration) {
     this.waitTime = configuration.getTimeDuration(
         HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT,
         HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT_DEFAULT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java
index b63d04e..6a0001c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/safemode/SafeModePrecheck.java
@@ -19,7 +19,7 @@
 package org.apache.hadoop.hdds.scm.safemode;
 
 import java.util.concurrent.atomic.AtomicBoolean;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ScmOps;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
@@ -33,7 +33,7 @@ public class SafeModePrecheck implements Precheck<ScmOps> {
   private AtomicBoolean inSafeMode;
   public static final String PRECHECK_TYPE = "SafeModePrecheck";
 
-  public SafeModePrecheck(Configuration conf) {
+  public SafeModePrecheck(ConfigurationSource conf) {
     boolean safeModeEnabled = conf.getBoolean(
         HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED,
         HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED_DEFAULT);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 383e7ce..1f2305f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -21,36 +21,33 @@
  */
 package org.apache.hadoop.hdds.scm.server;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.cache.Cache;
-import com.google.common.cache.CacheBuilder;
-import com.google.common.cache.RemovalListener;
-import com.google.protobuf.BlockingService;
-
+import javax.management.ObjectName;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
 import java.security.cert.CertificateException;
 import java.security.cert.X509Certificate;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.Map;
 import java.util.Objects;
-import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
-import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.ScmConfig;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.block.BlockManager;
 import org.apache.hadoop.hdds.scm.block.BlockManagerImpl;
 import org.apache.hadoop.hdds.scm.block.DeletedBlockLogImpl;
 import org.apache.hadoop.hdds.scm.block.PendingDeleteHandler;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementMetrics;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
-import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
 import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
@@ -59,27 +56,33 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.IncrementalContainerReportHandler;
+import org.apache.hadoop.hdds.scm.container.ReplicationManager;
+import org.apache.hadoop.hdds.scm.container.ReplicationManager.ReplicationManagerConfiguration;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicyFactory;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementMetrics;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.ContainerStat;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMMetrics;
-import org.apache.hadoop.hdds.scm.container.ReplicationManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStore;
 import org.apache.hadoop.hdds.scm.metadata.SCMMetadataStoreRDBImpl;
+import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.DeadNodeHandler;
 import org.apache.hadoop.hdds.scm.node.NewNodeHandler;
-import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.scm.node.NodeReportHandler;
+import org.apache.hadoop.hdds.scm.node.NonHealthyToHealthyNodeHandler;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.scm.node.StaleNodeHandler;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.safemode.SafeModeHandler;
 import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateServer;
@@ -88,6 +91,9 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
+import org.apache.hadoop.hdds.utils.HddsVersionInfo;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ipc.RPC;
@@ -104,23 +110,17 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.JvmPauseMonitor;
-import org.apache.hadoop.hdds.utils.HddsVersionInfo;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.cache.Cache;
+import com.google.common.cache.CacheBuilder;
+import com.google.common.cache.RemovalListener;
+import com.google.protobuf.BlockingService;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.management.ObjectName;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
-
 /**
  * StorageContainerManager is the main entry point for the service that
  * provides information about
@@ -505,7 +505,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    *
    * @param conf
    */
-  private void loginAsSCMUser(Configuration conf)
+  private void loginAsSCMUser(ConfigurationSource conf)
       throws IOException, AuthenticationException {
     if (LOG.isDebugEnabled()) {
       ScmConfig scmConfig = configuration.getObject(ScmConfig.class);
@@ -515,18 +515,20 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
           scmConfig.getKerberosKeytab());
     }
 
-    if (SecurityUtil.getAuthenticationMethod(conf).equals(
+    Configuration hadoopConf =
+        LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
+    if (SecurityUtil.getAuthenticationMethod(hadoopConf).equals(
         AuthenticationMethod.KERBEROS)) {
-      UserGroupInformation.setConfiguration(conf);
+      UserGroupInformation.setConfiguration(hadoopConf);
       InetSocketAddress socAddr = HddsServerUtil
           .getScmBlockClientBindAddress(conf);
-      SecurityUtil.login(conf,
+      SecurityUtil.login(hadoopConf,
             ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_KEYTAB_FILE_KEY,
             ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
             socAddr.getHostName());
     } else {
       throw new AuthenticationException(SecurityUtil.getAuthenticationMethod(
-          conf) + " authentication method not support. "
+          hadoopConf) + " authentication method not support. "
           + "SCM user login failed.");
     }
     LOG.info("SCM login successful.");
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
index 3f963fd..80b3eb9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManagerHttpServer.java
@@ -17,18 +17,18 @@
 
 package org.apache.hadoop.hdds.scm.server;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.server.http.BaseHttpServer;
 
-import java.io.IOException;
-
 /**
  * HttpServer2 wrapper for the Ozone Storage Container Manager.
  */
 public class StorageContainerManagerHttpServer extends BaseHttpServer {
 
-  public StorageContainerManagerHttpServer(Configuration conf)
+  public StorageContainerManagerHttpServer(ConfigurationSource conf)
       throws IOException {
     super(conf, "scm");
   }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
index 843586c..b3cb668 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtil.java
@@ -19,20 +19,18 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import org.apache.hadoop.conf.Configuration;
+import java.net.InetSocketAddress;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.utils.HddsServerUtil;
 
+import static org.hamcrest.core.Is.is;
+import static org.junit.Assert.assertThat;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.junit.rules.Timeout;
 
-import java.net.InetSocketAddress;
-
-import static org.hamcrest.core.Is.is;
-import static org.junit.Assert.assertThat;
-
 /**
  * Test the HDDS server side utilities.
  */
@@ -50,7 +48,7 @@ public class TestHddsServerUtil {
    */
   @Test
   public void testMissingScmDataNodeAddress() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     thrown.expect(IllegalArgumentException.class);
     HddsServerUtil.getScmAddressForDataNodes(conf);
   }
@@ -62,7 +60,7 @@ public class TestHddsServerUtil {
    */
   @Test
   public void testGetScmDataNodeAddress() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
 
     // First try a client address with just a host name. Verify it falls
     // back to the default port.
@@ -110,7 +108,7 @@ public class TestHddsServerUtil {
    */
   @Test
   public void testScmClientBindHostDefault() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
 
     // The bind host should be 0.0.0.0 unless OZONE_SCM_CLIENT_BIND_HOST_KEY
     // is set differently.
@@ -156,7 +154,7 @@ public class TestHddsServerUtil {
    */
   @Test
   public void testScmDataNodeBindHostDefault() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
 
     // The bind host should be 0.0.0.0 unless OZONE_SCM_DATANODE_BIND_HOST_KEY
     // is set differently.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
index d4cc5a4..56d265a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestHddsServerUtils.java
@@ -18,7 +18,10 @@
 
 package org.apache.hadoop.hdds.scm;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.net.InetSocketAddress;
+import java.util.concurrent.TimeUnit;
+
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.server.ServerUtils;
@@ -26,6 +29,12 @@ import org.apache.hadoop.hdds.utils.HddsServerUtil;
 import org.apache.hadoop.test.PathUtils;
 
 import org.apache.commons.io.FileUtils;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import org.junit.Rule;
 import org.junit.Test;
@@ -34,17 +43,6 @@ import org.junit.rules.Timeout;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.File;
-import java.net.InetSocketAddress;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CLIENT_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_ADDRESS_KEY;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DATANODE_PORT_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.junit.Assert.assertEquals;
-
 /**
  * Unit tests for {@link HddsServerUtil}.
  */
@@ -65,7 +63,7 @@ public class TestHddsServerUtils {
   @SuppressWarnings("StringSplitter")
   public void testGetDatanodeAddressWithPort() {
     final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -79,7 +77,7 @@ public class TestHddsServerUtils {
   @Test
   public void testGetDatanodeAddressWithoutPort() {
     final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_DATANODE_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -94,7 +92,7 @@ public class TestHddsServerUtils {
   @Test
   public void testDatanodeAddressFallbackToClientNoPort() {
     final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -111,7 +109,7 @@ public class TestHddsServerUtils {
   @SuppressWarnings("StringSplitter")
   public void testDatanodeAddressFallbackToClientWithPort() {
     final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_CLIENT_ADDRESS_KEY, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -126,7 +124,7 @@ public class TestHddsServerUtils {
   @Test
   public void testDatanodeAddressFallbackToScmNamesNoPort() {
     final String scmHost = "host123";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -143,7 +141,7 @@ public class TestHddsServerUtils {
   @SuppressWarnings("StringSplitter")
   public void testDatanodeAddressFallbackToScmNamesWithPort() {
     final String scmHost = "host123:100";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
     final InetSocketAddress address =
         HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -158,7 +156,7 @@ public class TestHddsServerUtils {
   @Test
   public void testClientFailsWithMultipleScmNames() {
     final String scmHost = "host123,host456";
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(OZONE_SCM_NAMES, scmHost);
     thrown.expect(IllegalArgumentException.class);
     HddsServerUtil.getScmAddressForDataNodes(conf);
@@ -172,7 +170,7 @@ public class TestHddsServerUtils {
     final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
     final File dbDir = new File(testDir, "scmDbDir");
     final File metaDir = new File(testDir, "metaDir");   // should be ignored.
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(ScmConfigKeys.OZONE_SCM_DB_DIRS, dbDir.getPath());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
 
@@ -192,7 +190,7 @@ public class TestHddsServerUtils {
   public void testGetScmDbDirWithFallback() {
     final File testDir = PathUtils.getTestDir(TestHddsServerUtils.class);
     final File metaDir = new File(testDir, "metaDir");
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDir.getPath());
     try {
       assertEquals(metaDir, ServerUtils.getScmDbDir(conf));
@@ -210,7 +208,7 @@ public class TestHddsServerUtils {
 
   @Test
   public void testGetStaleNodeInterval() {
-    final Configuration conf = new OzoneConfiguration();
+    final OzoneConfiguration conf = new OzoneConfiguration();
 
     // Reset OZONE_SCM_STALENODE_INTERVAL to 300s that
     // larger than max limit value.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
index 0376864..24c1449 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
@@ -26,8 +26,8 @@ import java.net.URLConnection;
 import java.util.Arrays;
 import java.util.Collection;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManagerHttpServer;
 import org.apache.hadoop.hdfs.web.URLConnectionFactory;
 import org.apache.hadoop.http.HttpConfig;
@@ -54,7 +54,7 @@ public class TestStorageContainerManagerHttpServer {
       .getTempPath(TestStorageContainerManagerHttpServer.class.getSimpleName());
   private static String keystoresDir;
   private static String sslConfDir;
-  private static Configuration conf;
+  private static OzoneConfiguration conf;
   private static URLConnectionFactory connectionFactory;
 
   @Parameters public static Collection<Object[]> policy() {
@@ -76,7 +76,7 @@ public class TestStorageContainerManagerHttpServer {
     File base = new File(BASEDIR);
     FileUtil.fullyDelete(base);
     base.mkdirs();
-    conf = new Configuration();
+    conf = new OzoneConfiguration();
     keystoresDir = new File(BASEDIR).getAbsolutePath();
     sslConfDir = KeyStoreTestUtil.getClasspathDir(
         TestStorageContainerManagerHttpServer.class);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 8891752..f567500 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -17,11 +17,13 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.commons.lang3.RandomUtils;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.io.IOException;
+
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -33,25 +35,23 @@ import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.BeforeClass;
-import org.junit.Test;
-
-import java.io.File;
-import java.io.IOException;
 
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
+import org.apache.commons.lang3.RandomUtils;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.CLOSE_CONTAINER;
 import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
+import org.junit.AfterClass;
+import org.junit.Assert;
+import org.junit.BeforeClass;
+import org.junit.Test;
 
 /**
  * Tests the closeContainerEventHandler class.
  */
 public class TestCloseContainerEventHandler {
 
-  private static Configuration configuration;
+  private static OzoneConfiguration configuration;
   private static MockNodeManager nodeManager;
   private static SCMPipelineManager pipelineManager;
   private static SCMContainerManager containerManager;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 41585bc..c7ec835 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -16,7 +16,7 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -59,7 +59,7 @@ public class TestContainerReportHandler {
 
   @Before
   public void setup() throws IOException {
-    final Configuration conf = new OzoneConfiguration();
+    final ConfigurationSource conf = new OzoneConfiguration();
     this.nodeManager = new MockNodeManager(true, 10);
     this.containerManager = Mockito.mock(ContainerManager.class);
     this.containerStateManager = new ContainerStateManager(conf);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 7961ba8..3c9e7b6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -17,7 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -55,7 +55,7 @@ public class TestIncrementalContainerReportHandler {
 
   @Before
   public void setup() throws IOException {
-    final Configuration conf = new OzoneConfiguration();
+    final ConfigurationSource conf = new OzoneConfiguration();
     this.containerManager = Mockito.mock(ContainerManager.class);
     this.nodeManager = Mockito.mock(NodeManager.class);
     this.containerStateManager = new ContainerStateManager(conf);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
index 87d7655..9a48dbb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestReplicationManager.java
@@ -18,7 +18,7 @@
 
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -71,7 +71,7 @@ public class TestReplicationManager {
 
   @Before
   public void setup() throws IOException, InterruptedException {
-    final Configuration conf = new OzoneConfiguration();
+    final ConfigurationSource conf = new OzoneConfiguration();
     final ContainerManager containerManager =
         Mockito.mock(ContainerManager.class);
     eventQueue = new EventQueue();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index ac4e8d1..75d2712 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -16,23 +16,41 @@
  */
 package org.apache.hadoop.hdds.scm.container;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Optional;
+import java.util.Random;
+import java.util.Set;
+import java.util.TreeSet;
+import java.util.UUID;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
+
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -41,25 +59,6 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.Iterator;
-import java.util.Optional;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Executors;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import java.util.stream.Collectors;
-import java.util.stream.IntStream;
-
 /**
  * Tests for Container ContainerManager.
  */
@@ -79,7 +78,7 @@ public class TestSCMContainerManager {
   public ExpectedException thrown = ExpectedException.none();
   @BeforeClass
   public static void setUp() throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
+    OzoneConfiguration conf = SCMTestUtils.getConf();
 
     testDir = GenericTestUtils
         .getTestDir(TestSCMContainerManager.class.getSimpleName());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
index 9c11caa..1c2cdd0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestUnknownContainerReport.java
@@ -23,7 +23,7 @@ import static org.mockito.Mockito.verify;
 import java.io.IOException;
 import java.util.Iterator;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -57,7 +57,7 @@ public class TestUnknownContainerReport {
 
   @Before
   public void setup() throws IOException {
-    final Configuration conf = new OzoneConfiguration();
+    final ConfigurationSource conf = new OzoneConfiguration();
     this.nodeManager = new MockNodeManager(true, 10);
     this.containerManager = Mockito.mock(ContainerManager.class);
     this.containerStateManager = new ContainerStateManager(conf);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
index a454de2..a0d5846 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
@@ -16,7 +16,10 @@
  */
 package org.apache.hadoop.hdds.scm.container.placement.algorithms;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -30,19 +33,15 @@ import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.net.NodeSchema;
 import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.mockito.Mockito;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
 
 import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 import static org.mockito.Matchers.anyObject;
+import org.mockito.Mockito;
 import static org.mockito.Mockito.when;
 
 /**
@@ -56,7 +55,7 @@ public class TestContainerPlacementFactory {
   // node storage capacity
   private final long storageCapacity = 100L;
   // configuration
-  private Configuration conf;
+  private OzoneConfiguration conf;
   // node manager
   private NodeManager nodeManager;
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index ddca0fa..afefc9a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -21,7 +21,7 @@ import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -43,7 +43,7 @@ public class TestSCMContainerPlacementCapacity {
   @Test
   public void chooseDatanodes() throws SCMException {
     //given
-    Configuration conf = new OzoneConfiguration();
+    ConfigurationSource conf = new OzoneConfiguration();
 
     List<DatanodeDetails> datanodes = new ArrayList<>();
     for (int i = 0; i < 7; i++) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index 992f1c5..f659b7a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@ -21,7 +21,7 @@ import java.util.Arrays;
 import java.util.Collection;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -60,7 +60,7 @@ import static org.mockito.Mockito.when;
 @RunWith(Parameterized.class)
 public class TestSCMContainerPlacementRackAware {
   private NetworkTopology cluster;
-  private Configuration conf;
+  private ConfigurationSource conf;
   private NodeManager nodeManager;
   private Integer datanodeCount;
   private List<DatanodeDetails> datanodes = new ArrayList<>();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 91509a0..7976246 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdds.scm.container.placement.algorithms;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -42,7 +42,7 @@ public class TestSCMContainerPlacementRandom {
   @Test
   public void chooseDatanodes() throws SCMException {
     //given
-    Configuration conf = new OzoneConfiguration();
+    ConfigurationSource conf = new OzoneConfiguration();
 
     List<DatanodeDetails> datanodes = new ArrayList<>();
     for (int i = 0; i < 5; i++) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 8808546..de027ed 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -18,20 +18,23 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
-import org.apache.commons.io.IOUtils;
-import org.apache.hadoop.conf.Configuration;
+import java.io.File;
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.PlacementPolicy;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
@@ -40,25 +43,18 @@ import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.PathUtils;
+
+import org.apache.commons.io.IOUtils;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
+import static org.junit.Assert.assertEquals;
 import org.junit.Ignore;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.ExpectedException;
 import org.mockito.Mockito;
 
-import java.io.File;
-import java.io.IOException;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .OZONE_SCM_DB_CACHE_SIZE_MB;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
-import static org.junit.Assert.assertEquals;
-
 /**
  * Test for different container placement policy.
  */
@@ -101,7 +97,7 @@ public class TestContainerPlacement {
     return nodeManager;
   }
 
-  SCMContainerManager createContainerManager(Configuration config,
+  SCMContainerManager createContainerManager(ConfigurationSource config,
       NodeManager scmNodeManager) throws IOException {
     EventQueue eventQueue = new EventQueue();
     final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
index 340ebf5..f9fb150 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
@@ -18,16 +18,16 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.conf.Configuration;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 
-import java.io.IOException;
-import java.util.List;
-
 /**
  * Mock Ratis Pipeline Provider for Mock Nodes.
  */
@@ -37,27 +37,27 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider {
   private  boolean isHealthy;
 
   public MockRatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, Configuration conf,
+      PipelineStateManager stateManager, ConfigurationSource conf,
       EventPublisher eventPublisher, boolean autoOpen) {
     super(nodeManager, stateManager, conf, eventPublisher);
     autoOpenPipeline = autoOpen;
   }
 
   public MockRatisPipelineProvider(NodeManager nodeManager,
-                            PipelineStateManager stateManager,
-                            Configuration conf) {
+      PipelineStateManager stateManager,
+      ConfigurationSource conf) {
     super(nodeManager, stateManager, conf, new EventQueue());
   }
 
   public MockRatisPipelineProvider(NodeManager nodeManager,
-                                   PipelineStateManager stateManager,
-                                   Configuration conf, boolean isHealthy) {
+      PipelineStateManager stateManager,
+      ConfigurationSource conf, boolean isHealthy) {
     super(nodeManager, stateManager, conf, new EventQueue());
     this.isHealthy = isHealthy;
   }
 
   public MockRatisPipelineProvider(NodeManager nodeManager,
-      PipelineStateManager stateManager, Configuration conf,
+      PipelineStateManager stateManager, ConfigurationSource conf,
       EventPublisher eventPublisher) {
     super(nodeManager, stateManager, conf, eventPublisher);
     autoOpenPipeline = true;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index fafc4b0..b8b8622 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -18,7 +18,13 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import org.apache.hadoop.conf.Configuration;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.UUID;
+import java.util.stream.Collectors;
+
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -26,19 +32,22 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.net.*;
+import org.apache.hadoop.hdds.scm.net.NetConstants;
+import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
+import org.apache.hadoop.hdds.scm.net.Node;
+import org.apache.hadoop.hdds.scm.net.NodeImpl;
+import org.apache.hadoop.hdds.scm.net.NodeSchema;
+import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
 import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-
-import java.util.*;
-import java.util.stream.Collectors;
 
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
 import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
 
 /**
  * Test for PipelinePlacementPolicy.
@@ -190,7 +199,8 @@ public class TestPipelinePlacementPolicy {
     Assert.assertTrue(anchor.getNetworkLocation().equals(
         randomNode.getNetworkLocation()));
 
-    NetworkTopology topology = new NetworkTopologyImpl(new Configuration());
+    NetworkTopology topology =
+        new NetworkTopologyImpl(new OzoneConfiguration());
     DatanodeDetails nextNode = placementPolicy.chooseNodeBasedOnRackAwareness(
         nodesWithOutRackAwareness, new ArrayList<>(
             PIPELINE_PLACEMENT_MAX_NODES_COUNT), topology, anchor);
@@ -229,7 +239,8 @@ public class TestPipelinePlacementPolicy {
   };
 
   private NetworkTopology createNetworkTopologyOnDifRacks() {
-    NetworkTopology topology = new NetworkTopologyImpl(new Configuration());
+    NetworkTopology topology =
+        new NetworkTopologyImpl(new OzoneConfiguration());
     for (Node n : NODES) {
       topology.add(n);
     }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index 0ab7083..fcb1c94 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -18,13 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT;
-import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
-import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.fail;
-
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -35,25 +28,30 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.stream.Collectors;
 
-import com.google.common.base.Supplier;
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
-    .PipelineReportFromDatanode;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.base.Supplier;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT;
+import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
+import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
 import org.junit.After;
 import org.junit.Assert;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -63,7 +61,7 @@ import org.junit.Test;
 public class TestSCMPipelineManager {
   private static MockNodeManager nodeManager;
   private static File testDir;
-  private static Configuration conf;
+  private static OzoneConfiguration conf;
 
   @Before
   public void setUp() throws Exception {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index 8966cd8..0620883 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -17,10 +17,6 @@
  */
 package org.apache.hadoop.hdds.scm.safemode;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-
 import java.io.File;
 import java.util.ArrayList;
 import java.util.Collections;
@@ -29,7 +25,6 @@ import java.util.UUID;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicInteger;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -37,8 +32,8 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.pipeline.*;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
@@ -47,7 +42,11 @@ import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.test.GenericTestUtils;
+
 import org.junit.Assert;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
@@ -61,7 +60,7 @@ public class TestSCMSafeModeManager {
 
   private static EventQueue queue;
   private SCMSafeModeManager scmSafeModeManager;
-  private static Configuration config;
+  private static OzoneConfiguration config;
   private List<ContainerInfo> containers = Collections.emptyList();
 
   @Rule
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index bd89b88..663ac8c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -22,7 +22,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.UUID;
 
-import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -83,7 +82,7 @@ public class TestEndPoint {
   private static RPC.Server scmServer;
   private static ScmTestMock scmServerImpl;
   private static File testDir;
-  private static Configuration config;
+  private static OzoneConfiguration config;
 
   @AfterClass
   public static void tearDown() throws Exception {
@@ -292,7 +291,7 @@ public class TestEndPoint {
 
   private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
       int rpcTimeout, boolean clearDatanodeDetails) throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
+    OzoneConfiguration conf = SCMTestUtils.getConf();
     EndpointStateMachine rpcEndPoint =
         createEndpoint(conf,
             scmAddress, rpcTimeout);
@@ -453,7 +452,7 @@ public class TestEndPoint {
 
   private StateContext heartbeatTaskHelper(InetSocketAddress scmAddress,
       int rpcTimeout) throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
+    OzoneConfiguration conf = SCMTestUtils.getConf();
     conf.set(DFS_DATANODE_DATA_DIR_KEY, testDir.getAbsolutePath());
     conf.set(OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     // Mini Ozone cluster will not come up if the port is not true, since
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
index f0b1cbb..a0cf957 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/placement/TestContainerPlacement.java
@@ -16,27 +16,24 @@
  */
 package org.apache.hadoop.ozone.container.placement;
 
-import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
-import org.apache.hadoop.conf.Configuration;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Random;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementCapacity;
-import org.apache.hadoop.hdds.scm.container.placement.algorithms
-    .SCMContainerPlacementRandom;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementCapacity;
+import org.apache.hadoop.hdds.scm.container.placement.algorithms.SCMContainerPlacementRandom;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.junit.Assert;
-import org.junit.Test;
-
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
 
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
-    .HEALTHY;
+import org.apache.commons.math3.stat.descriptive.DescriptiveStatistics;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
+import org.junit.Assert;
 import static org.junit.Assert.assertEquals;
+import org.junit.Test;
 
 /**
  * Asserts that allocation strategy works as expected.
@@ -80,10 +77,11 @@ public class TestContainerPlacement {
         .getStandardDeviation(), 0.001);
 
     SCMContainerPlacementCapacity capacityPlacer = new
-        SCMContainerPlacementCapacity(nodeManagerCapacity, new Configuration(),
+        SCMContainerPlacementCapacity(nodeManagerCapacity,
+        new OzoneConfiguration(),
         null, true, null);
     SCMContainerPlacementRandom randomPlacer = new
-        SCMContainerPlacementRandom(nodeManagerRandom, new Configuration(),
+        SCMContainerPlacementRandom(nodeManagerRandom, new OzoneConfiguration(),
         null, true, null);
 
     for (int x = 0; x < opsCount; x++) {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 62bb385..1607f63 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -24,6 +24,7 @@ import java.util.List;
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerDataProto;
@@ -42,6 +43,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
 import org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
@@ -51,9 +53,9 @@ import org.apache.hadoop.security.UserGroupInformation;
 
 import com.google.common.base.Preconditions;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
-import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT;
+import static org.apache.hadoop.hdds.utils.HddsServerUtil.getScmSecurityClient;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -76,7 +78,7 @@ public class ContainerOperationClient implements ScmClient {
 
   private final XceiverClientManager xceiverClientManager;
 
-  public ContainerOperationClient(Configuration conf) throws IOException {
+  public ContainerOperationClient(OzoneConfiguration conf) throws IOException {
     storageContainerLocationClient = newContainerRpcClient(conf);
     this.xceiverClientManager = newXCeiverClientManager(conf);
     containerSizeB = (int) conf.getStorageSize(OZONE_SCM_CONTAINER_SIZE,
@@ -93,7 +95,7 @@ public class ContainerOperationClient implements ScmClient {
     }
   }
 
-  private XceiverClientManager newXCeiverClientManager(Configuration conf)
+  private XceiverClientManager newXCeiverClientManager(ConfigurationSource conf)
       throws IOException {
     XceiverClientManager manager;
     if (OzoneSecurityUtil.isSecurityEnabled(conf)) {
@@ -112,14 +114,15 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   public static StorageContainerLocationProtocol newContainerRpcClient(
-      Configuration conf) throws IOException {
+      ConfigurationSource configSource) throws IOException {
 
     Class<StorageContainerLocationProtocolPB> protocol =
         StorageContainerLocationProtocolPB.class;
-
+    Configuration conf =
+        LegacyHadoopConfigurationSource.asHadoopConfiguration(configSource);
     RPC.setProtocolEngine(conf, protocol, ProtobufRpcEngine.class);
     long version = RPC.getProtocolVersion(protocol);
-    InetSocketAddress scmAddress = getScmAddressForClients(conf);
+    InetSocketAddress scmAddress = getScmAddressForClients(configSource);
     UserGroupInformation user = UserGroupInformation.getCurrentUser();
     SocketFactory socketFactory = NetUtils.getDefaultSocketFactory(conf);
     int rpcTimeOut = Client.getRpcTimeout(conf);
@@ -131,7 +134,7 @@ public class ContainerOperationClient implements ScmClient {
     StorageContainerLocationProtocolClientSideTranslatorPB client =
         new StorageContainerLocationProtocolClientSideTranslatorPB(rpcProxy);
     return TracingUtil.createProxy(
-        client, StorageContainerLocationProtocol.class, conf);
+        client, StorageContainerLocationProtocol.class, configSource);
   }
 
   @Override
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
index b5fcd18..b3efbda 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/ObjectStore.java
@@ -26,7 +26,7 @@ import java.util.List;
 import java.util.NoSuchElementException;
 import java.util.Objects;
 
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
@@ -67,7 +67,7 @@ public class ObjectStore {
    * @param conf Configuration object.
    * @param proxy ClientProtocol proxy.
    */
-  public ObjectStore(Configuration conf, ClientProtocol proxy) {
+  public ObjectStore(ConfigurationSource conf, ClientProtocol proxy) {
     this.proxy = TracingUtil.createProxy(proxy, ClientProtocol.class, conf);
     this.listCacheSize = HddsClientUtils.getListCacheSize(conf);
   }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index e766ebd..87710ea 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -21,7 +21,7 @@ package org.apache.hadoop.ozone.client;
 import com.fasterxml.jackson.annotation.JsonIgnore;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.protocol.StorageType;
 import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
@@ -104,7 +104,7 @@ public class OzoneBucket extends WithMetadata {
   private OzoneObj ozoneObj;
 
 
-  private OzoneBucket(Configuration conf, String volumeName,
+  private OzoneBucket(ConfigurationSource conf, String volumeName,
       String bucketName, ReplicationFactor defaultReplication,
       ReplicationType defaultReplicationType, ClientProtocol proxy) {
     Preconditions.checkNotNull(proxy, "Client proxy is not set.");
@@ -133,7 +133,7 @@ public class OzoneBucket extends WithMetadata {
         .setStoreType(OzoneObj.StoreType.OZONE).build();
   }
   @SuppressWarnings("parameternumber")
-  public OzoneBucket(Configuration conf, ClientProtocol proxy,
+  public OzoneBucket(ConfigurationSource conf, ClientProtocol proxy,
       String volumeName, String bucketName, StorageType storageType,
       Boolean versioning, long creationTime, Map<String, String> metadata,
       String encryptionKeyName) {
... 2856 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org