You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by xy...@apache.org on 2020/01/31 22:39:03 UTC

[hadoop-ozone] branch HDDS-2665-ofs updated: HDDS-2665. Merge master to HDDS-2665-ofs branch (#511)

This is an automated email from the ASF dual-hosted git repository.

xyao pushed a commit to branch HDDS-2665-ofs
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/HDDS-2665-ofs by this push:
     new 79c0c1c  HDDS-2665. Merge master to HDDS-2665-ofs branch (#511)
79c0c1c is described below

commit 79c0c1c8c2279b3b83c4b977959f09154de9a20d
Author: Siyao Meng <50...@users.noreply.github.com>
AuthorDate: Fri Jan 31 14:38:55 2020 -0800

    HDDS-2665. Merge master to HDDS-2665-ofs branch (#511)
---
 .github/buildenv/Dockerfile                        |   2 +-
 .github/workflows/post-commit.yml                  |  90 +++++++++
 .github/workflows/pr.yml                           |  90 +++++++++
 hadoop-hdds/client/pom.xml                         |   8 +
 .../apache/hadoop/hdds/conf/RatisClientConfig.java |  84 ++++++++
 .../apache/hadoop/hdds/conf/RatisGrpcConfig.java   |  63 +++---
 .../org/apache/hadoop/hdds/conf/package-info.java  |  17 +-
 .../apache/hadoop/hdds/scm/XceiverClientGrpc.java  |  25 ++-
 .../hadoop/hdds/scm/XceiverClientManager.java      |  13 --
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  35 ++--
 .../hdds/scm/client/ContainerOperationClient.java  |   8 +-
 .../hadoop/hdds/scm/client/HddsClientUtils.java    |   4 +-
 .../hadoop/hdds/scm/storage/BlockOutputStream.java |   6 +-
 .../hadoop/hdds/scm/storage/CommitWatcher.java     |   8 +-
 .../hdds/scm/storage/DummyBlockInputStream.java    |  92 +++++++++
 .../storage/DummyBlockInputStreamWithRetry.java    |  78 ++++++++
 .../hdds/scm/storage/DummyChunkInputStream.java    |  66 +++++++
 .../hdds/scm/storage/TestBlockInputStream.java     | 106 +---------
 .../hdds/scm/storage/TestChunkInputStream.java     |  52 +----
 hadoop-hdds/common/pom.xml                         |  32 +++
 .../apache/hadoop/hdds/DFSConfigKeysLegacy.java    |  97 +++++++++
 .../java/org/apache/hadoop/hdds/HddsUtils.java     |  27 ++-
 .../java/org/apache/hadoop/hdds/StringUtils.java   |  76 +++++++
 .../hadoop/hdds/fs/AbstractSpaceUsageSource.java   |  87 ++++++++
 .../hadoop/hdds/fs/CachingSpaceUsageSource.java    | 142 ++++++++++++++
 .../main/java/org/apache/hadoop/hdds/fs/DU.java    | 136 +++++++++++++
 .../java/org/apache/hadoop/hdds/fs/DUFactory.java  |  90 +++++++++
 .../hadoop/hdds/fs/DedicatedDiskSpaceUsage.java    |  52 +++++
 .../hdds/fs/DedicatedDiskSpaceUsageFactory.java    |  86 ++++++++
 .../hadoop/hdds/fs/SaveSpaceUsageToFile.java       | 129 ++++++++++++
 .../hadoop/hdds/fs/SpaceUsageCheckFactory.java     | 145 ++++++++++++++
 .../hadoop/hdds/fs/SpaceUsageCheckParams.java      |  92 +++++++++
 .../hadoop/hdds/fs/SpaceUsagePersistence.java      |  63 ++++++
 .../apache/hadoop/hdds/fs/SpaceUsageSource.java    |  28 ++-
 .../org/apache/hadoop/hdds/fs/package-info.java    |  15 +-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  | 129 ++++++++----
 .../apache/hadoop/hdds/recon/ReconConfigKeys.java  |   2 +
 .../org/apache/hadoop/hdds/scm/ScmConfigKeys.java  |  23 +--
 .../apache/hadoop/hdds/scm/XceiverClientSpi.java   |   3 +-
 .../apache/hadoop/hdds/scm/client/ScmClient.java   |   7 +
 .../hadoop/hdds/scm/protocol/LocatedContainer.java | 127 ------------
 .../hadoop/hdds/scm/protocol/ScmLocatedBlock.java  | 100 ----------
 .../protocol/StorageContainerLocationProtocol.java |  10 +
 ...inerLocationProtocolClientSideTranslatorPB.java |  16 ++
 .../security/x509/certificate/utils/CRLCodec.java  |  27 ++-
 .../hadoop/hdds/utils/MetadataKeyFilters.java      |  15 +-
 .../org/apache/hadoop/hdds/utils/db/DBStore.java   |   2 +-
 .../hadoop/hdds/utils/db/DBStoreBuilder.java       |   6 +-
 .../org/apache/hadoop/hdds/utils/db/RDBStore.java  |  12 +-
 .../org/apache/hadoop/hdds/utils/db/RDBTable.java  |   4 +-
 .../apache/hadoop/hdds/utils/db/StringCodec.java   |   7 +-
 .../apache/hadoop/hdds/utils/db/TableConfig.java   |   5 +-
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |  29 +--
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |   1 -
 .../ozone/conf/DatanodeRatisServerConfig.java      | 141 +++++++++++++
 .../org/apache/hadoop/ozone/conf/package-info.java |  17 +-
 .../proto/StorageContainerLocationProtocol.proto   |  12 ++
 .../common/src/main/resources/ozone-default.xml    |  60 ++----
 .../hadoop/hdds/fs/MockSpaceUsageCheckFactory.java |  57 ++++++
 .../hadoop/hdds/fs/MockSpaceUsageCheckParams.java  |  71 +++++++
 .../hadoop/hdds/fs/MockSpaceUsagePersistence.java  |  47 +++--
 .../hadoop/hdds/fs/MockSpaceUsageSource.java       |  74 +++++++
 .../hdds/fs/TestCachingSpaceUsageSource.java       | 197 +++++++++++++++++++
 .../java/org/apache/hadoop/hdds/fs/TestDU.java     | 112 +++++++++++
 .../org/apache/hadoop/hdds/fs/TestDUFactory.java   |  57 ++++++
 .../hdds/fs/TestDedicatedDiskSpaceUsage.java       |  63 ++++++
 .../fs/TestDedicatedDiskSpaceUsageFactory.java     |  57 ++++++
 .../hadoop/hdds/fs/TestSaveSpaceUsageToFile.java   | 150 ++++++++++++++
 .../hadoop/hdds/fs/TestSpaceUsageFactory.java      | 197 +++++++++++++++++++
 .../apache/hadoop/hdds/ratis/TestRatisHelper.java  | 122 ++++++++++++
 .../token/TestOzoneBlockTokenIdentifier.java       |   5 +-
 .../x509/certificate/utils/TestCRLCodec.java       | 131 ++++++++++++-
 .../hadoop/hdds/utils/TestMetadataStore.java       |  53 +++--
 .../hadoop/hdds/utils/db/TestDBConfigFromFile.java |   7 +-
 .../apache/hadoop/hdds/utils/db/TestRDBStore.java  |  25 +--
 .../hadoop/hdds/utils/db/TestRDBTableStore.java    |   6 +-
 .../hdds/utils/db/TestTypedRDBTableStore.java      |   5 +-
 hadoop-hdds/container-service/pom.xml              |  14 ++
 .../apache/hadoop/ozone/HddsDatanodeService.java   |  44 ++---
 .../container/common/helpers/ContainerMetrics.java |   4 +-
 .../common/statemachine/StateContext.java          | 114 +++++++----
 .../common/states/datanode/InitDatanodeState.java  |   2 +
 .../states/endpoint/HeartbeatEndpointTask.java     |   9 +-
 .../server/ratis/ContainerStateMachine.java        |   9 +-
 .../transport/server/ratis/XceiverServerRatis.java |  58 ++----
 .../ozone/container/common/volume/HddsVolume.java  |  58 +++---
 .../container/common/volume/HddsVolumeChecker.java | 101 +++++-----
 .../ozone/container/common/volume/VolumeInfo.java  |  44 +++--
 .../ozone/container/common/volume/VolumeSet.java   |  11 +-
 .../ozone/container/common/volume/VolumeUsage.java | 169 ++--------------
 .../StorageContainerDatanodeProtocolPB.java        |   8 +-
 .../hadoop/ozone/TestHddsDatanodeService.java      |  10 +-
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   |  28 +--
 .../ozone/container/common/SCMTestUtils.java       |   5 +
 .../common/statemachine/TestStateContext.java      | 117 +++++++++++
 .../states/endpoint/TestHeartbeatEndpointTask.java |   8 +
 .../container/common/volume/TestHddsVolume.java    |  59 +++---
 .../common/volume/TestHddsVolumeChecker.java       |   7 +-
 .../volume/TestRoundRobinVolumeChoosingPolicy.java |  90 ++++-----
 .../container/common/volume/TestVolumeSet.java     |   4 +-
 .../common/volume/TestVolumeSetDiskChecks.java     |  10 +-
 .../src/test/resources/ozone-site.xml              |  30 +++
 .../docs/content/{shell => interface}/_index.zh.md |   8 +-
 hadoop-hdds/docs/content/shell/BucketCommands.md   |   1 +
 .../docs/content/shell/BucketCommands.zh.md        |  98 +++++++++
 hadoop-hdds/docs/content/shell/Format.md           |   4 +-
 hadoop-hdds/docs/content/shell/Format.zh.md        |  65 ++++++
 hadoop-hdds/docs/content/shell/KeyCommands.md      |   4 +-
 hadoop-hdds/docs/content/shell/KeyCommands.zh.md   | 138 +++++++++++++
 .../docs/content/shell/VolumeCommands.zh.md        | 107 ++++++++++
 hadoop-hdds/docs/content/shell/_index.zh.md        |   7 +-
 hadoop-hdds/docs/content/start/FromSource.zh.md    |  31 ++-
 hadoop-hdds/docs/content/start/Kubernetes.zh.md    |  19 +-
 hadoop-hdds/docs/content/start/Minikube.zh.md      |  24 +--
 hadoop-hdds/docs/content/start/OnPrem.zh.md        | 123 +++++-------
 .../docs/content/start/RunningViaDocker.zh.md      |  25 +--
 .../docs/content/start/StartFromDockerHub.zh.md    |  61 +++---
 hadoop-hdds/docs/content/start/_index.zh.md        |   3 +-
 hadoop-hdds/docs/dev-support/bin/generate-site.sh  |   2 +
 hadoop-hdds/framework/pom.xml                      |  25 +++
 .../apache/hadoop/hdds/server/BaseHttpServer.java  |  40 ++--
 .../hadoop/hdds/server/events/EventQueue.java      |  13 +-
 hadoop-hdds/pom.xml                                |  50 -----
 hadoop-hdds/server-scm/pom.xml                     |  43 +++-
 .../hdds/scm/container/SCMContainerManager.java    |   8 +-
 .../hadoop/hdds/scm/node/SCMNodeManager.java       |  20 +-
 .../hadoop/hdds/scm/pipeline/PipelineFactory.java  |  15 +-
 .../hadoop/hdds/scm/pipeline/PipelineManager.java  |   2 +
 .../hdds/scm/pipeline/PipelineReportHandler.java   |  38 ++--
 .../hdds/scm/pipeline/PipelineStateManager.java    |  11 +-
 .../hdds/scm/pipeline/SCMPipelineManager.java      |  68 +++++--
 ...inerLocationProtocolServerSideTranslatorPB.java |  18 ++
 .../hdds/scm/safemode/SCMSafeModeManager.java      |   2 +-
 .../hadoop/hdds/scm/safemode/SafeModeManager.java  |  14 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |   9 +-
 .../scm/TestStorageContainerManagerHttpServer.java |  25 +--
 .../hadoop/hdds/scm/node/TestSCMNodeManager.java   |  73 ++++---
 .../ozone/container/common/TestEndPoint.java       |  83 +++-----
 .../ozone/client/io/BlockOutputStreamEntry.java    |   2 +-
 .../hadoop/ozone/client/io/KeyOutputStream.java    |   5 -
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  10 -
 .../ozone/om/exceptions/OMReplayException.java}    |  23 ++-
 .../hadoop/ozone/om/helpers/OmBucketInfo.java      |  84 +++++++-
 .../apache/hadoop/ozone/om/helpers/OmKeyInfo.java  | 100 +++++++++-
 .../ozone/om/helpers/OmMultipartKeyInfo.java       | 110 ++++++++++-
 .../hadoop/ozone/om/helpers/OzoneFileStatus.java   |  14 +-
 .../src/main/proto/OzoneManagerProtocol.proto      |   7 +-
 .../om/codec/TestOmMultipartKeyInfoCodec.java      |  12 +-
 .../ozone/om/helpers/TestOmMultipartKeyInfo.java   |  11 +-
 hadoop-ozone/dev-support/checks/integration.sh     |  10 +-
 hadoop-ozone/dev-support/checks/unit.sh            |   5 +-
 .../src/main/compose/ozone/docker-compose.yaml     |   1 -
 hadoop-ozone/dist/src/main/compose/ozone/test.sh   |   4 +
 .../compose/ozonesecure-mr/docker-compose.yaml     |   9 +
 .../src/main/compose/ozonesecure-mr/docker-config  |   3 +-
 .../main/compose/ozonesecure/docker-compose.yaml   |  10 +-
 .../src/main/compose/ozonesecure/docker-config     |   3 +-
 .../dist/src/main/compose/ozonesecure/test.sh      |   2 +
 hadoop-ozone/dist/src/main/compose/testlib.sh      |  22 ++-
 .../main/smoketest/om-ratis/testOMAdminCmd.robot   |  21 +-
 .../dist/src/main/smoketest/recon/recon-api.robot  |  25 +--
 hadoop-ozone/integration-test/pom.xml              | 132 +++++++++++--
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |  18 +-
 .../hadoop/fs/ozone/TestOzoneFileInterfaces.java   |  21 --
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       | 121 +++++++-----
 .../hadoop/fs/ozone/TestOzoneFsRenameDir.java      | 110 -----------
 .../TestContainerStateManagerIntegration.java      |   1 +
 .../hadoop/hdds/scm/pipeline/TestNodeFailure.java  |  16 +-
 .../TestRatisPipelineCreateAndDestroy.java         |   2 +
 .../scm/pipeline/TestRatisPipelineProvider.java    | 207 -------------------
 .../safemode/TestSCMSafeModeWithPipelineRules.java |   2 +
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 122 +++++-------
 .../org/apache/hadoop/ozone/RatisTestHelper.java   |   9 +-
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |   7 +-
 .../hadoop/ozone/TestOzoneConfigurationFields.java |   5 +-
 .../hadoop/ozone/TestStorageContainerManager.java  |   2 +
 .../ozone/client/rpc/Test2WayCommitInRatis.java    |   6 +-
 .../ozone/client/rpc/TestBlockOutputStream.java    |   1 -
 .../rpc/TestBlockOutputStreamWithFailures.java     |   3 +-
 .../rpc/TestCloseContainerHandlingByClient.java    |   2 +-
 .../hadoop/ozone/client/rpc/TestCommitWatcher.java |   7 +-
 .../rpc/TestContainerReplicationEndToEnd.java      |  12 +-
 .../client/rpc/TestContainerStateMachine.java      |   3 +-
 .../rpc/TestContainerStateMachineFailures.java     |   2 +
 .../client/rpc/TestDeleteWithSlowFollower.java     |  11 +-
 .../client/rpc/TestFailureHandlingByClient.java    |   4 +-
 .../ozone/client/rpc/TestKeyInputStream.java       |   1 -
 .../rpc/TestMultiBlockWritesWithDnFailures.java    |   4 +-
 .../client/rpc/TestOzoneAtRestEncryption.java      |  10 +-
 .../rpc/TestOzoneClientRetriesOnException.java     |   3 +-
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  32 +--
 .../client/rpc/TestOzoneRpcClientWithRatis.java    |   2 +
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |   2 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |   4 +-
 .../ozone/client/rpc/TestWatchForCommit.java       |  62 +-----
 .../ozone/container/TestContainerReplication.java  |   2 +
 .../commandhandler/TestBlockDeletion.java          |   2 +
 .../TestCloseContainerByPipeline.java              |   2 +
 .../transport/server/ratis/TestCSMMetrics.java     |   2 +
 .../container/metrics/TestContainerMetrics.java    |   4 +-
 .../container/ozoneimpl/TestOzoneContainer.java    |   2 +
 .../server/TestSecureContainerServer.java          |   2 +
 .../hadoop/ozone/dn/scrubber/TestDataScrubber.java |   4 +-
 .../hadoop/ozone/freon/TestDataValidate.java       |   2 -
 .../ozone/freon/TestFreonWithPipelineDestroy.java  |   2 +
 .../ozone/freon/TestOzoneClientKeyGenerator.java   |   2 -
 .../hadoop/ozone/freon/TestRandomKeyGenerator.java |   2 -
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 175 +++++++++--------
 .../org/apache/hadoop/ozone/om/TestKeyPurging.java |   3 +-
 .../hadoop/ozone/om/TestOMDbCheckpointServlet.java |   7 +-
 .../hadoop/ozone/om/TestOMRatisSnapshots.java      |   2 +
 .../apache/hadoop/ozone/om/TestOzoneManagerHA.java |   4 +-
 .../hadoop/ozone/om/TestOzoneManagerRestart.java   |   2 +
 .../ozone/om/TestOzoneManagerRocksDBLogging.java   |  46 ++---
 .../apache/hadoop/ozone/om/TestScmSafeMode.java    |   2 +
 .../org/apache/hadoop/ozone/recon/TestRecon.java   | 126 +++++-------
 .../scm/TestGetCommittedBlockLengthAndPutKey.java  |   4 +-
 .../TestSCMContainerPlacementPolicyMetrics.java    |   5 +-
 .../hadoop/ozone/scm/node/TestQueryNode.java       |   2 +
 .../ozone/scm/pipeline/TestSCMPipelineMetrics.java |   2 +
 .../src/test/resources/mapred-site.xml             |  24 +++
 .../src/test/resources/ozone-site.xml              |  29 +++
 .../src/test/resources/yarn-site.xml               |  24 +++
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  73 ++++---
 .../hadoop/ozone/om/OMDBCheckpointServlet.java     |   3 +-
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |  15 ++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |   4 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |  45 +++--
 .../om/request/bucket/OMBucketDeleteRequest.java   |  39 ++--
 .../request/bucket/OMBucketSetPropertyRequest.java |  67 ++++---
 .../om/request/bucket/acl/OMBucketAclRequest.java  |  18 ++
 .../request/bucket/acl/OMBucketAddAclRequest.java  |   5 +-
 .../bucket/acl/OMBucketRemoveAclRequest.java       |   5 +-
 .../request/bucket/acl/OMBucketSetAclRequest.java  |   5 +-
 .../om/request/file/OMDirectoryCreateRequest.java  |   6 +-
 .../ozone/om/request/file/OMFileCreateRequest.java | 156 +++++++++++----
 .../om/request/key/OMAllocateBlockRequest.java     |   5 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |   5 +-
 .../ozone/om/request/key/OMKeyCreateRequest.java   | 141 ++++++++++---
 .../ozone/om/request/key/OMKeyDeleteRequest.java   |  86 +++++---
 .../ozone/om/request/key/OMKeyRenameRequest.java   | 188 +++++++++++++-----
 .../hadoop/ozone/om/request/key/OMKeyRequest.java  | 188 ++++--------------
 .../request/s3/bucket/S3BucketCreateRequest.java   | 156 ++++++++-------
 .../request/s3/bucket/S3BucketDeleteRequest.java   |  42 +++-
 .../S3InitiateMultipartUploadRequest.java          |  17 +-
 .../multipart/S3MultipartUploadAbortRequest.java   |   1 +
 .../S3MultipartUploadCommitPartRequest.java        |   5 +
 .../S3MultipartUploadCompleteRequest.java          |  19 +-
 .../om/response/bucket/OMBucketCreateResponse.java |  29 +--
 .../om/response/bucket/OMBucketDeleteResponse.java |  29 +--
 .../bucket/OMBucketSetPropertyResponse.java        |  29 +--
 .../response/bucket/acl/OMBucketAclResponse.java   |  21 +-
 .../om/response/file/OMFileCreateResponse.java     |  13 +-
 .../ozone/om/response/key/OMKeyCreateResponse.java |  30 +--
 .../ozone/om/response/key/OMKeyDeleteResponse.java |  13 +-
 .../ozone/om/response/key/OMKeyRenameResponse.java |  81 ++++++--
 .../response/s3/bucket/S3BucketCreateResponse.java |  41 ++--
 .../response/s3/bucket/S3BucketDeleteResponse.java |  26 ++-
 .../protocolPB/OzoneManagerRequestHandler.java     |   2 -
 .../hadoop/ozone/web/ozShell/OzoneAddress.java     |  37 ++++
 .../ozone/web/ozShell/s3/GetS3SecretHandler.java   |  10 +-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java |  11 +-
 .../ozone/om/TestOzoneManagerHttpServer.java       |   8 +-
 ...TestOzoneManagerDoubleBufferWithOMResponse.java |  11 +-
 .../ozone/om/request/TestOMRequestUtils.java       |  75 +++++--
 .../ozone/om/request/bucket/TestBucketRequest.java |   1 +
 .../request/bucket/TestOMBucketCreateRequest.java  |  48 ++++-
 .../request/bucket/TestOMBucketDeleteRequest.java  |  46 ++++-
 .../bucket/TestOMBucketSetPropertyRequest.java     |  34 +++-
 .../om/request/file/TestOMFileCreateRequest.java   |  42 ++--
 .../om/request/key/TestOMKeyCreateRequest.java     |  48 ++++-
 .../om/request/key/TestOMKeyDeleteRequest.java     |  44 ++++-
 .../om/request/key/TestOMKeyRenameRequest.java     | 106 +++++++++-
 .../ozone/om/request/key/TestOMKeyRequest.java     |   1 +
 .../s3/bucket/TestS3BucketCreateRequest.java       |  28 ++-
 .../s3/bucket/TestS3BucketDeleteRequest.java       |  36 ++++
 .../om/request/s3/bucket/TestS3BucketRequest.java  |   2 +-
 .../ozone/om/response/TestOMResponseUtils.java     |   6 +-
 .../bucket/TestOMBucketCreateResponse.java         |   5 +-
 .../bucket/TestOMBucketDeleteResponse.java         |  17 +-
 .../bucket/TestOMBucketSetPropertyResponse.java    |   5 +-
 .../om/response/key/TestOMKeyCreateResponse.java   |   6 +-
 .../om/response/key/TestOMKeyDeleteResponse.java   |  12 +-
 .../om/response/key/TestOMKeyRenameResponse.java   |  14 +-
 .../s3/bucket/TestS3BucketDeleteResponse.java      |   2 +-
 .../s3/multipart/TestS3MultipartResponse.java      |  11 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  67 +------
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  31 +--
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |  66 +------
 .../fs/ozone/BasicRootedOzoneFileSystem.java       |  21 +-
 .../fs/ozone/CapableOzoneFSInputStream.java}       |  35 ++--
 .../apache/hadoop/fs/ozone/FileStatusAdapter.java  |  12 +-
 .../hadoop/fs/ozone/FilteredClassLoader.java       |   1 -
 .../apache/hadoop/fs/ozone/OzoneFSInputStream.java |   2 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java    |   6 +
 .../hadoop/fs/ozone/OzoneStreamCapabilities.java}  |  21 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |  12 ++
 .../apache/hadoop/ozone/recon/ReconConstants.java  |  11 +-
 .../hadoop/ozone/recon/ReconControllerModule.java  |  22 ++-
 .../apache/hadoop/ozone/recon/ReconHttpServer.java |   4 +-
 .../org/apache/hadoop/ozone/recon/ReconServer.java |  82 +++++---
 .../ozone/recon/api/ContainerKeyService.java       |   8 +-
 .../recon/recovery/ReconOmMetadataManagerImpl.java |   2 +-
 .../ozone/recon/scm/ReconContainerManager.java     |  58 ++++++
 .../recon/scm/ReconDatanodeProtocolServer.java     |  32 ---
 .../ozone/recon/scm/ReconPipelineFactory.java      |  72 +++++++
 .../ozone/recon/scm/ReconPipelineManager.java      | 139 +++++++++++++
 .../recon/scm/ReconPipelineReportHandler.java      |  90 +++++++++
 .../ReconSafeModeManager.java}                     |  16 +-
 ...ava => ReconStorageContainerManagerFacade.java} | 107 +++++++++-
 .../recon/spi/ContainerDBServiceProvider.java      |   7 +-
 .../recon/spi/StorageContainerServiceProvider.java |  21 ++
 .../spi/impl/ContainerDBServiceProviderImpl.java   |  31 ++-
 .../spi/impl/OzoneManagerServiceProviderImpl.java  |  13 +-
 .../recon/spi/impl/ReconContainerDBProvider.java   |  22 ++-
 .../impl/StorageContainerServiceProviderImpl.java  |  57 ++++++
 .../ozone/recon/tasks/ReconTaskController.java     |   7 +-
 .../ozone/recon/tasks/ReconTaskControllerImpl.java |  11 +-
 .../webapps/recon/ozone-recon-web/LICENSE          |   2 +-
 .../webapps/recon/ozone-recon-web/api/db.json      | 129 ++++++++++++
 .../webapps/recon/ozone-recon-web/api/routes.json  |   3 +-
 .../webapps/recon/ozone-recon-web/package.json     |   1 +
 .../webapps/recon/ozone-recon-web/src/App.less     |  19 ++
 .../src/components/OverviewCard/OverviewCard.less  |   4 +
 .../src/components/OverviewCard/OverviewCard.tsx   |   9 +-
 .../src/constants/breadcrumbs.constants.tsx        |   3 +-
 .../webapps/recon/ozone-recon-web/src/routes.tsx   |  10 +
 .../src/views/Datanodes/Datanodes.less             |  17 --
 .../src/views/Datanodes/Datanodes.tsx              |  13 +-
 .../MissingContainers.less}                        |  10 -
 .../views/MissingContainers/MissingContainers.tsx  | 218 +++++++++++++++++++++
 .../src/views/Overview/Overview.less               |   3 +
 .../src/views/Overview/Overview.tsx                |  35 +++-
 .../Overview.less => Pipelines/Pipelines.less}     |  12 +-
 .../src/views/Pipelines/Pipelines.tsx              | 158 +++++++++++++++
 .../webapps/recon/ozone-recon-web/yarn.lock        | 204 +++----------------
 .../ozone/recon/AbstractOMMetadataManagerTest.java |   5 +-
 .../ozone/recon/scm/TestReconPipelineManager.java  | 173 ++++++++++++++++
 .../recon/scm/TestReconPipelineReportHandler.java  | 101 ++++++++++
 .../TestStorageContainerServiceProviderImpl.java   |  93 +++++++++
 .../recon/tasks/TestReconTaskControllerImpl.java   |   1 +
 .../ozone/admin/om/GetServiceRolesSubcommand.java  |   9 +-
 .../org/apache/hadoop/ozone/admin/om/OMAdmin.java  |  17 +-
 .../hadoop/ozone/freon/DatanodeChunkGenerator.java |   2 +-
 .../org/apache/hadoop/ozone/freon/ProgressBar.java |  44 ++++-
 .../apache/hadoop/ozone/freon/TestProgressBar.java |   3 +-
 pom.xml                                            |   2 +-
 347 files changed, 9378 insertions(+), 3831 deletions(-)

diff --git a/.github/buildenv/Dockerfile b/.github/buildenv/Dockerfile
index a8f96cf..0c9cd77 100644
--- a/.github/buildenv/Dockerfile
+++ b/.github/buildenv/Dockerfile
@@ -12,7 +12,7 @@
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License.
-FROM elek/ozone-build:20191106-1
+FROM apache/ozone-build
 USER root
 ADD entrypoint.sh /entrypoint.sh
 RUN chmod +x /entrypoint.sh
diff --git a/.github/workflows/post-commit.yml b/.github/workflows/post-commit.yml
index e90c782..d17a828 100644
--- a/.github/workflows/post-commit.yml
+++ b/.github/workflows/post-commit.yml
@@ -113,3 +113,93 @@ jobs:
           with:
             name: acceptance
             path: target/acceptance
+  it-freon:
+    name: it-freon
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pfreon
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-freon
+            path: target/integration
+  it-filesystem:
+    name: it-filesystem
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pfilesystem
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-filesystem
+            path: target/integration
+  it-filesystem-contract:
+    name: it-filesystem-contract
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pfilesystem-contract
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-filesystem-contract
+            path: target/integration
+  it-client-and-hdds:
+    name: it-client-and-hdds
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pclient-and-hdds
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-client-and-hdds
+            path: target/integration
+  it-om:
+    name: it-om
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pom
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-om
+            path: target/integration
+  it-ozone:
+    name: it-ozone
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pozone
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-ozone
+            path: target/integration
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
index 046601e..c32c959 100644
--- a/.github/workflows/pr.yml
+++ b/.github/workflows/pr.yml
@@ -106,3 +106,93 @@ jobs:
           with:
             name: acceptance
             path: target/acceptance
+  it-freon:
+    name: it-freon
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pfreon
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-freon
+            path: target/integration
+  it-filesystem:
+    name: it-filesystem
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pfilesystem
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-filesystem
+            path: target/integration
+  it-filesystem-contract:
+    name: it-filesystem-contract
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pfilesystem-contract
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-filesystem-contract
+            path: target/integration
+  it-client-and-hdds:
+    name: it-client-and-hdds
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pclient-and-hdds
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-client-and-hdds
+            path: target/integration
+  it-om:
+    name: it-om
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pom
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-om
+            path: target/integration
+  it-ozone:
+    name: it-ozone
+    runs-on: ubuntu-18.04
+    needs:
+        - build
+    steps:
+        - uses: actions/checkout@master
+        - uses: ./.github/buildenv
+          with:
+             args: ./hadoop-ozone/dev-support/checks/integration.sh -Pozone
+        - uses: actions/upload-artifact@master
+          if: always()
+          with:
+            name: it-ozone
+            path: target/integration
diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index 673af41..7b744d3 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -40,5 +40,13 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>netty-all</artifactId>
     </dependency>
 
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+
   </dependencies>
 </project>
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/RatisClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/RatisClientConfig.java
new file mode 100644
index 0000000..0bc1aab
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/RatisClientConfig.java
@@ -0,0 +1,84 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.conf;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
+
+/**
+ * Configuration for Ratis Client. This is the config used in creating
+ * RaftClient creation.
+ *
+ */
+@ConfigGroup(prefix = "raft.client")
+public class RatisClientConfig {
+  @Config(key = "async.outstanding-requests.max",
+      defaultValue = "32",
+      type = ConfigType.INT,
+      tags = {OZONE, CLIENT, PERFORMANCE},
+      description =
+          "Controls the maximum number of outstanding async requests that can"
+              + " be handled by the Standalone as well as Ratis client."
+  )
+  private int maxOutstandingRequests;
+
+  public int getMaxOutstandingRequests() {
+    return maxOutstandingRequests;
+  }
+
+  public void setMaxOutstandingRequests(int maxOutstandingRequests) {
+    this.maxOutstandingRequests = maxOutstandingRequests;
+  }
+
+  @Config(key = "rpc.request.timeout",
+      defaultValue = "60s",
+      type = ConfigType.TIME,
+      tags = {OZONE, CLIENT, PERFORMANCE},
+      description = "The timeout duration for ratis client request (except " +
+          "for watch request). It should be set greater than leader " +
+          "election timeout in Ratis."
+  )
+  private long requestTimeOut = 60 * 1000;
+
+  public long getRequestTimeOut() {
+    return requestTimeOut;
+  }
+
+  public void setRequestTimeOut(long requestTimeOut) {
+    this.requestTimeOut = requestTimeOut;
+  }
+
+  @Config(key = "watch.request.timeout",
+      defaultValue = "180s",
+      type = ConfigType.TIME,
+      tags = {OZONE, CLIENT, PERFORMANCE},
+      description = "The timeout duration for ratis client watch request. " +
+          "Timeout for the watch API in Ratis client to acknowledge a " +
+          "particular request getting replayed to all servers."
+  )
+  private long watchRequestTimeOut = 180 * 1000;
+
+  public long getWatchRequestTimeOut() {
+    return watchRequestTimeOut;
+  }
+
+  public void setWatchRequestTimeOut(long watchRequestTimeOut) {
+    this.watchRequestTimeOut = watchRequestTimeOut;
+  }
+}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/RatisGrpcConfig.java
similarity index 50%
copy from hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less
copy to hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/RatisGrpcConfig.java
index 227f634..b0bb140 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/App.less
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/RatisGrpcConfig.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,46 +16,31 @@
  * limitations under the License.
  */
 
-@import "./components/NavBar/NavBar.less";
+package org.apache.hadoop.hdds.conf;
 
-.ant-layout-header {
-  padding: 0 20px;
-  height: 50px;
-  line-height: 50px;
-  background: #FFF;
-}
+import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
 
-.content-layout {
-  margin-left: 200px;
-  &.sidebar-collapsed {
-    margin-left: @sidebar-collapsed-width;
+/**
+ * Ratis Grpc Config Keys.
+ */
+@ConfigGroup(prefix = "raft.grpc")
+public class RatisGrpcConfig {
+  @Config(key = "message.size.max",
+      defaultValue = "32MB",
+      type = ConfigType.INT,
+      tags = {OZONE, CLIENT, PERFORMANCE},
+      description = "Maximum message size allowed to be recieved by Grpc " +
+          "Channel (Server)."
+  )
+  private int maximumMessageSize = 32 * 1024 * 1024;
+
+  public int getMaximumMessageSize() {
+    return maximumMessageSize;
   }
-}
-
-.page-header {
-  padding: 10px 0;
-  font-size: 20px;
-  font-weight: 500;
-}
-
-.content-div {
-  padding: 24px;
-  background-color: #FFF;
-  min-height: 80vh;
-}
-
-body {
-  font-family: 'Roboto', sans-serif;
-}
-
-.icon-warning {
-  color: #e49f00;
-}
-
-.icon-success {
-  color: #1da57a;
-}
 
-.icon-failure {
-  color: #f83437;
+  public void setMaximumMessageSize(int maximumMessageSize) {
+    this.maximumMessageSize = maximumMessageSize;
+  }
 }
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
similarity index 87%
copy from hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less
copy to hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
index c8e74d2..b28bbed 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/conf/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,12 +16,9 @@
  * limitations under the License.
  */
 
-.overview-content {
-  margin: 20px 5px;
-  .icon-small {
-    font-size: 16px;
-  }
-  .meta {
-    font-size: 12px;
-  }
-}
+/**
+ This package contains configuration related classes.
+ */
+
+package org.apache.hadoop.hdds.conf;
+
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
index 9a4da38..668fdaa 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientGrpc.java
@@ -53,6 +53,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.security.cert.X509Certificate;
 import java.util.Collections;
 import java.util.HashMap;
@@ -231,8 +232,14 @@ public class XceiverClientGrpc extends XceiverClientSpi {
     try {
       return sendCommandWithTraceIDAndRetry(request, null).
           getResponse().get();
-    } catch (ExecutionException | InterruptedException e) {
+    } catch (ExecutionException e) {
       throw new IOException("Failed to execute command " + request, e);
+    } catch (InterruptedException e) {
+      LOG.error("Command execution was interrupted.");
+      Thread.currentThread().interrupt();
+      throw (IOException) new InterruptedIOException(
+          "Command " + request + " was interrupted.")
+          .initCause(e);
     }
   }
 
@@ -244,8 +251,14 @@ public class XceiverClientGrpc extends XceiverClientSpi {
       XceiverClientReply reply;
       reply = sendCommandWithTraceIDAndRetry(request, validators);
       return reply.getResponse().get();
-    } catch (ExecutionException | InterruptedException e) {
+    } catch (ExecutionException e) {
       throw new IOException("Failed to execute command " + request, e);
+    } catch (InterruptedException e) {
+      LOG.error("Command execution was interrupted.");
+      Thread.currentThread().interrupt();
+      throw (IOException) new InterruptedIOException(
+          "Command " + request + " was interrupted.")
+          .initCause(e);
     }
   }
 
@@ -327,7 +340,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
       } catch (IOException e) {
         ioException = e;
         responseProto = null;
-      } catch (ExecutionException | InterruptedException e) {
+      } catch (ExecutionException e) {
         LOG.debug("Failed to execute command {} on datanode {}",
             request, dn.getUuid(), e);
         if (Status.fromThrowable(e.getCause()).getCode()
@@ -338,6 +351,10 @@ public class XceiverClientGrpc extends XceiverClientSpi {
 
         ioException = new IOException(e);
         responseProto = null;
+      } catch (InterruptedException e) {
+        LOG.error("Command execution was interrupted ", e);
+        Thread.currentThread().interrupt();
+        responseProto = null;
       }
     }
 
@@ -473,7 +490,7 @@ public class XceiverClientGrpc extends XceiverClientSpi {
   }
 
   @Override
-  public XceiverClientReply watchForCommit(long index, long timeout)
+  public XceiverClientReply watchForCommit(long index)
       throws InterruptedException, ExecutionException, TimeoutException,
       IOException {
     // there is no notion of watch for commit index in standalone pipeline
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
index b889103..4ceff0b 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java
@@ -322,15 +322,6 @@ public class XceiverClientManager implements Closeable {
     )
     private long staleThreshold;
 
-    @Config(key = "max.outstanding.requests",
-        defaultValue = "100",
-        tags = {OZONE, PERFORMANCE},
-        description =
-            "Controls the maximum number of outstanding async requests that can"
-                + " be handled by the Standalone as well as Ratis client."
-    )
-    private int maxOutstandingRequests;
-
     public long getStaleThreshold(TimeUnit unit) {
       return unit.convert(staleThreshold, MILLISECONDS);
     }
@@ -345,10 +336,6 @@ public class XceiverClientManager implements Closeable {
       this.maxSize = maxSize;
     }
 
-    public int getMaxOutstandingRequests() {
-      return maxOutstandingRequests;
-    }
-
   }
 
 }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 70c7882..0d12355 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -30,7 +30,6 @@ import java.util.concurrent.CompletionException;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ExecutionException;
-import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeoutException;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.stream.Collectors;
@@ -59,7 +58,6 @@ import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferException;
-import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -89,25 +87,20 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     final String rpcType = ozoneConf
         .get(ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_KEY,
             ScmConfigKeys.DFS_CONTAINER_RATIS_RPC_TYPE_DEFAULT);
-    final TimeDuration clientRequestTimeout =
-        RatisHelper.getClientRequestTimeout(ozoneConf);
-    final int maxOutstandingRequests =
-        HddsClientUtils.getMaxOutstandingRequests(ozoneConf);
     final RetryPolicy retryPolicy = RatisHelper.createRetryPolicy(ozoneConf);
     final GrpcTlsConfig tlsConfig = RatisHelper.createTlsClientConfig(new
         SecurityConfig(ozoneConf), caCert);
     return new XceiverClientRatis(pipeline,
-        SupportedRpcType.valueOfIgnoreCase(rpcType), maxOutstandingRequests,
-        retryPolicy, tlsConfig, clientRequestTimeout);
+        SupportedRpcType.valueOfIgnoreCase(rpcType),
+        retryPolicy, tlsConfig, ozoneConf);
   }
 
   private final Pipeline pipeline;
   private final RpcType rpcType;
   private final AtomicReference<RaftClient> client = new AtomicReference<>();
-  private final int maxOutstandingRequests;
   private final RetryPolicy retryPolicy;
   private final GrpcTlsConfig tlsConfig;
-  private final TimeDuration clientRequestTimeout;
+  private final Configuration ozoneConfiguration;
 
   // Map to track commit index at every server
   private final ConcurrentHashMap<UUID, Long> commitInfoMap;
@@ -118,17 +111,16 @@ public final class XceiverClientRatis extends XceiverClientSpi {
    * Constructs a client.
    */
   private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
-      int maxOutStandingChunks, RetryPolicy retryPolicy,
-      GrpcTlsConfig tlsConfig, TimeDuration timeout) {
+      RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
+      Configuration configuration) {
     super();
     this.pipeline = pipeline;
     this.rpcType = rpcType;
-    this.maxOutstandingRequests = maxOutStandingChunks;
     this.retryPolicy = retryPolicy;
     commitInfoMap = new ConcurrentHashMap<>();
     this.tlsConfig = tlsConfig;
-    this.clientRequestTimeout = timeout;
     metrics = XceiverClientManager.getXceiverClientMetrics();
+    this.ozoneConfiguration = configuration;
   }
 
   private void updateCommitInfosMap(
@@ -175,12 +167,10 @@ public final class XceiverClientRatis extends XceiverClientSpi {
       LOG.debug("Connecting to pipeline:{} datanode:{}", getPipeline().getId(),
           RatisHelper.toRaftPeerId(pipeline.getFirstNode()));
     }
-    // TODO : XceiverClient ratis should pass the config value of
-    // maxOutstandingRequests so as to set the upper bound on max no of async
-    // requests to be handled by raft client
+
     if (!client.compareAndSet(null,
         RatisHelper.newRaftClient(rpcType, getPipeline(), retryPolicy,
-            maxOutstandingRequests, tlsConfig, clientRequestTimeout))) {
+            tlsConfig, ozoneConfiguration))) {
       throw new IllegalStateException("Client is already connected.");
     }
   }
@@ -254,7 +244,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
   }
 
   @Override
-  public XceiverClientReply watchForCommit(long index, long timeout)
+  public XceiverClientReply watchForCommit(long index)
       throws InterruptedException, ExecutionException, TimeoutException,
       IOException {
     long commitIndex = getReplicatedMinCommitIndex();
@@ -265,14 +255,11 @@ public final class XceiverClientRatis extends XceiverClientSpi {
       clientReply.setLogIndex(commitIndex);
       return clientReply;
     }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("commit index : {} watch timeout : {}", index, timeout);
-    }
     RaftClientReply reply;
     try {
       CompletableFuture<RaftClientReply> replyFuture = getClient()
           .sendWatchAsync(index, RaftProtos.ReplicationLevel.ALL_COMMITTED);
-      replyFuture.get(timeout, TimeUnit.MILLISECONDS);
+      replyFuture.get();
     } catch (Exception e) {
       Throwable t = HddsClientUtils.checkForException(e);
       LOG.warn("3 way commit failed on pipeline {}", pipeline, e);
@@ -281,7 +268,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
       }
       reply = getClient()
           .sendWatchAsync(index, RaftProtos.ReplicationLevel.MAJORITY_COMMITTED)
-          .get(timeout, TimeUnit.MILLISECONDS);
+          .get();
       List<RaftProtos.CommitInfoProto> commitInfoProtoList =
           reply.getCommitInfos().stream()
               .filter(i -> i.getCommitIndex() < index)
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
index 6e2fd59..1447608 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java
@@ -107,7 +107,7 @@ public class ContainerOperationClient implements ScmClient {
     return manager;
   }
 
-  private StorageContainerLocationProtocol newContainerRpcClient(
+  public static StorageContainerLocationProtocol newContainerRpcClient(
       Configuration conf) throws IOException {
 
     Class<StorageContainerLocationProtocolPB> protocol =
@@ -283,6 +283,12 @@ public class ContainerOperationClient implements ScmClient {
   }
 
   @Override
+  public Pipeline getPipeline(HddsProtos.PipelineID pipelineID)
+      throws IOException {
+    return storageContainerLocationClient.getPipeline(pipelineID);
+  }
+
+  @Override
   public void activatePipeline(HddsProtos.PipelineID pipelineID)
       throws IOException {
     storageContainerLocationClient.activatePipeline(pipelineID);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
index ee8a208..6a88b38 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/HddsClientUtils.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.XceiverClientManager.ScmClientConfig;
+import org.apache.hadoop.hdds.conf.RatisClientConfig;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
 import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
@@ -280,7 +280,7 @@ public final class HddsClientUtils {
    */
   public static int getMaxOutstandingRequests(Configuration config) {
     return OzoneConfiguration.of(config)
-        .getObject(ScmClientConfig.class)
+        .getObject(RatisClientConfig.class)
         .getMaxOutstandingRequests();
   }
 
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
index 15aebe1..9131f5c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockOutputStream.java
@@ -125,7 +125,6 @@ public class BlockOutputStream extends OutputStream {
    * @param bufferPool           pool of buffers
    * @param streamBufferFlushSize flush size
    * @param streamBufferMaxSize   max size of the currentBuffer
-   * @param watchTimeout          watch timeout
    * @param checksumType          checksum type
    * @param bytesPerChecksum      Bytes per checksum
    */
@@ -133,8 +132,7 @@ public class BlockOutputStream extends OutputStream {
   public BlockOutputStream(BlockID blockID,
       XceiverClientManager xceiverClientManager, Pipeline pipeline,
       int chunkSize, long streamBufferFlushSize, long streamBufferMaxSize,
-      long watchTimeout, BufferPool bufferPool, ChecksumType checksumType,
-      int bytesPerChecksum)
+      BufferPool bufferPool, ChecksumType checksumType, int bytesPerChecksum)
       throws IOException {
     this.blockID = new AtomicReference<>(blockID);
     this.chunkSize = chunkSize;
@@ -154,7 +152,7 @@ public class BlockOutputStream extends OutputStream {
 
     // A single thread executor handle the responses of async requests
     responseExecutor = Executors.newSingleThreadExecutor();
-    commitWatcher = new CommitWatcher(bufferPool, xceiverClient, watchTimeout);
+    commitWatcher = new CommitWatcher(bufferPool, xceiverClient);
     bufferList = null;
     totalDataFlushedLength = 0;
     writtenDataLength = 0;
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
index ebcc6dc..34d0d7c 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/CommitWatcher.java
@@ -71,17 +71,13 @@ public class CommitWatcher {
 
   private XceiverClientSpi xceiverClient;
 
-  private final long watchTimeout;
-
   // total data which has been successfully flushed and acknowledged
   // by all servers
   private long totalAckDataLength;
 
-  public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient,
-      long watchTimeout) {
+  public CommitWatcher(BufferPool bufferPool, XceiverClientSpi xceiverClient) {
     this.bufferPool = bufferPool;
     this.xceiverClient = xceiverClient;
-    this.watchTimeout = watchTimeout;
     commitIndex2flushedDataMap = new ConcurrentSkipListMap<>();
     totalAckDataLength = 0;
     futureMap = new ConcurrentHashMap<>();
@@ -191,7 +187,7 @@ public class CommitWatcher {
     long index;
     try {
       XceiverClientReply reply =
-          xceiverClient.watchForCommit(commitIndex, watchTimeout);
+          xceiverClient.watchForCommit(commitIndex);
       if (reply == null) {
         index = 0;
       } else {
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java
new file mode 100644
index 0000000..5db722a
--- /dev/null
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStream.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.storage;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
+
+/**
+ * A dummy BlockInputStream to mock read block call to DN.
+ */
+class DummyBlockInputStream extends BlockInputStream {
+
+  private List<ChunkInfo> chunks;
+
+  private Map<String, byte[]> chunkDataMap;
+
+  @SuppressWarnings("parameternumber")
+  DummyBlockInputStream(
+      BlockID blockId,
+      long blockLen,
+      Pipeline pipeline,
+      Token<OzoneBlockTokenIdentifier> token,
+      boolean verifyChecksum,
+      XceiverClientManager xceiverClientManager,
+      List<ChunkInfo> chunkList,
+      Map<String, byte[]> chunkMap) {
+    super(blockId, blockLen, pipeline, token, verifyChecksum,
+        xceiverClientManager);
+    this.chunks = chunkList;
+    this.chunkDataMap = chunkMap;
+  }
+
+  @SuppressWarnings("parameternumber")
+  DummyBlockInputStream(
+      BlockID blockId,
+      long blockLen,
+      Pipeline pipeline,
+      Token<OzoneBlockTokenIdentifier> token,
+      boolean verifyChecksum,
+      XceiverClientManager xceiverClientManager,
+      Function<BlockID, Pipeline> refreshFunction,
+      List<ChunkInfo> chunkList,
+      Map<String, byte[]> chunks) {
+    super(blockId, blockLen, pipeline, token, verifyChecksum,
+        xceiverClientManager, refreshFunction);
+    this.chunkDataMap = chunks;
+    this.chunks = chunkList;
+
+  }
+
+  @Override
+  protected List<ChunkInfo> getChunkInfos() throws IOException {
+    return chunks;
+  }
+
+  @Override
+  protected void addStream(ChunkInfo chunkInfo) {
+    TestChunkInputStream testChunkInputStream = new TestChunkInputStream();
+    getChunkStreams().add(new DummyChunkInputStream(testChunkInputStream,
+        chunkInfo, null, null, false,
+        chunkDataMap.get(chunkInfo.getChunkName()).clone()));
+  }
+
+  @Override
+  protected synchronized void checkOpen() throws IOException {
+    // No action needed
+  }
+}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java
new file mode 100644
index 0000000..1686ed4
--- /dev/null
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyBlockInputStreamWithRetry.java
@@ -0,0 +1,78 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.storage;
+
+import java.io.IOException;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.atomic.AtomicBoolean;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
+
+/**
+ * A dummy BlockInputStream with pipeline refresh function to mock read
+ * block call to DN.
+ */
+final class DummyBlockInputStreamWithRetry
+    extends DummyBlockInputStream {
+
+  private int getChunkInfoCount = 0;
+
+  @SuppressWarnings("parameternumber")
+  DummyBlockInputStreamWithRetry(
+      BlockID blockId,
+      long blockLen,
+      Pipeline pipeline,
+      Token<OzoneBlockTokenIdentifier> token,
+      boolean verifyChecksum,
+      XceiverClientManager xceiverClientManager,
+      List<ChunkInfo> chunkList,
+      Map<String, byte[]> chunkMap,
+      AtomicBoolean isRerfreshed) {
+    super(blockId, blockLen, pipeline, token, verifyChecksum,
+        xceiverClientManager, blockID -> {
+          isRerfreshed.set(true);
+          return Pipeline.newBuilder()
+              .setState(Pipeline.PipelineState.OPEN)
+              .setId(PipelineID.randomId())
+              .setType(HddsProtos.ReplicationType.STAND_ALONE)
+              .setFactor(HddsProtos.ReplicationFactor.ONE)
+              .setNodes(Collections.emptyList())
+              .build();
+        }, chunkList, chunkMap);
+  }
+
+  @Override
+  protected List<ChunkInfo> getChunkInfos() throws IOException {
+    if (getChunkInfoCount == 0) {
+      getChunkInfoCount++;
+      throw new ContainerNotFoundException("Exception encountered");
+    } else {
+      return super.getChunkInfos();
+    }
+  }
+}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java
new file mode 100644
index 0000000..8405f43
--- /dev/null
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/DummyChunkInputStream.java
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.storage;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+
+/**
+ * A dummy ChunkInputStream to mock read chunk calls to DN.
+ */
+public class DummyChunkInputStream extends ChunkInputStream {
+
+  private byte[] chunkData;
+
+  // Stores the read chunk data in each readChunk call
+  private List<ByteString> readByteBuffers = new ArrayList<>();
+
+  public DummyChunkInputStream(TestChunkInputStream testChunkInputStream,
+      ChunkInfo chunkInfo,
+      BlockID blockId,
+      XceiverClientSpi xceiverClient,
+      boolean verifyChecksum,
+      byte[] data) {
+    super(chunkInfo, blockId, xceiverClient, verifyChecksum);
+    this.chunkData = data;
+  }
+
+  @Override
+  protected ByteString readChunk(ChunkInfo readChunkInfo) {
+    ByteString byteString = ByteString.copyFrom(chunkData,
+        (int) readChunkInfo.getOffset(),
+        (int) readChunkInfo.getLen());
+    getReadByteBuffers().add(byteString);
+    return byteString;
+  }
+
+  @Override
+  protected void checkOpen() {
+    // No action needed
+  }
+
+  public List<ByteString> getReadByteBuffers() {
+    return readByteBuffers;
+  }
+}
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
index 6c47ef6..3f5e12a 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockInputStream.java
@@ -23,27 +23,19 @@ import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
-import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.ozone.common.Checksum;
-import org.apache.hadoop.security.token.Token;
+
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.EOFException;
-import java.io.IOException;
 import java.util.ArrayList;
-import java.util.Collections;
 import java.util.HashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.function.Function;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import static org.apache.hadoop.hdds.scm.storage.TestChunkInputStream.generateRandomData;
 
@@ -60,7 +52,7 @@ public class TestBlockInputStream {
   private int blockSize;
   private List<ChunkInfo> chunks;
   private Map<String, byte[]> chunkDataMap;
-  private boolean refreshFunctionFlag = false;
+  private AtomicBoolean isRefreshed = new AtomicBoolean();
 
   @Before
   public void setup() throws Exception {
@@ -69,7 +61,7 @@ public class TestBlockInputStream {
     createChunkList(5);
 
     blockStream = new DummyBlockInputStream(blockID, blockSize, null, null,
-        false, null);
+        false, null, chunks, chunkDataMap);
   }
 
   /**
@@ -109,51 +101,6 @@ public class TestBlockInputStream {
     }
   }
 
-  /**
-   * A dummy BlockInputStream to mock read block call to DN.
-   */
-  private class DummyBlockInputStream extends BlockInputStream {
-
-    DummyBlockInputStream(BlockID blockId,
-        long blockLen,
-        Pipeline pipeline,
-        Token<OzoneBlockTokenIdentifier> token,
-        boolean verifyChecksum,
-        XceiverClientManager xceiverClientManager) {
-      super(blockId, blockLen, pipeline, token, verifyChecksum,
-          xceiverClientManager);
-    }
-
-    DummyBlockInputStream(BlockID blockId,
-                          long blockLen,
-                          Pipeline pipeline,
-                          Token<OzoneBlockTokenIdentifier> token,
-                          boolean verifyChecksum,
-                          XceiverClientManager xceiverClientManager,
-                          Function<BlockID, Pipeline> refreshFunction) {
-      super(blockId, blockLen, pipeline, token, verifyChecksum,
-          xceiverClientManager, refreshFunction);
-    }
-
-    @Override
-    protected List<ChunkInfo> getChunkInfos() throws IOException {
-      return chunks;
-    }
-
-    @Override
-    protected void addStream(ChunkInfo chunkInfo) {
-      TestChunkInputStream testChunkInputStream = new TestChunkInputStream();
-      getChunkStreams().add(testChunkInputStream.new DummyChunkInputStream(
-          chunkInfo, null, null, false,
-          chunkDataMap.get(chunkInfo.getChunkName()).clone()));
-    }
-
-    @Override
-    protected synchronized void checkOpen() throws IOException {
-      // No action needed
-    }
-  }
-
   private void seekAndVerify(int pos) throws Exception {
     blockStream.seek(pos);
     Assert.assertEquals("Current position of buffer does not match with the " +
@@ -249,57 +196,18 @@ public class TestBlockInputStream {
     matchWithInputData(b2, 150, 100);
   }
 
-  /**
-   * A dummy BlockInputStream with pipeline refresh function to mock read
-   * block call to DN.
-   */
-  private final class DummyBlockInputStreamWithRetry
-      extends DummyBlockInputStream {
-
-    private int getChunkInfoCount = 0;
-
-    private DummyBlockInputStreamWithRetry(BlockID blockId,
-                                   long blockLen,
-                                   Pipeline pipeline,
-                                   Token<OzoneBlockTokenIdentifier> token,
-                                   boolean verifyChecksum,
-                                   XceiverClientManager xceiverClientManager) {
-      super(blockId, blockLen, pipeline, token, verifyChecksum,
-          xceiverClientManager, blockID -> {
-            refreshFunctionFlag = true;
-            return Pipeline.newBuilder()
-                .setState(Pipeline.PipelineState.OPEN)
-                .setId(PipelineID.randomId())
-                .setType(HddsProtos.ReplicationType.STAND_ALONE)
-                .setFactor(HddsProtos.ReplicationFactor.ONE)
-                .setNodes(Collections.emptyList())
-                .build();
-          });
-    }
-
-    @Override
-    protected List<ChunkInfo> getChunkInfos() throws IOException {
-      if (getChunkInfoCount == 0) {
-        getChunkInfoCount++;
-        throw new ContainerNotFoundException("Exception encountered");
-      } else {
-        return super.getChunkInfos();
-      }
-    }
-  }
-
   @Test
   public void testRefreshPipelineFunction() throws Exception {
     BlockID blockID = new BlockID(new ContainerBlockID(1, 1));
     createChunkList(5);
     BlockInputStream blockInputStreamWithRetry =
         new DummyBlockInputStreamWithRetry(blockID, blockSize, null, null,
-        false, null);
+            false, null, chunks, chunkDataMap, isRefreshed);
 
-    Assert.assertFalse(refreshFunctionFlag);
+    Assert.assertFalse(isRefreshed.get());
     seekAndVerify(50);
     byte[] b = new byte[200];
     blockInputStreamWithRetry.read(b, 0, 200);
-    Assert.assertTrue(refreshFunctionFlag);
+    Assert.assertTrue(isRefreshed.get());
   }
 }
diff --git a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
index a5fe26b..94ec157 100644
--- a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
+++ b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestChunkInputStream.java
@@ -18,21 +18,17 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
-import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
-import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 
 import java.io.EOFException;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Random;
 
 /**
@@ -66,7 +62,8 @@ public class TestChunkInputStream {
             chunkData, 0, CHUNK_SIZE).getProtoBufMessage())
         .build();
 
-    chunkStream = new DummyChunkInputStream(chunkInfo, null, null, true);
+    chunkStream =
+        new DummyChunkInputStream(this, chunkInfo, null, null, true, chunkData);
   }
 
   static byte[] generateRandomData(int length) {
@@ -76,45 +73,6 @@ public class TestChunkInputStream {
   }
 
   /**
-   * A dummy ChunkInputStream to mock read chunk calls to DN.
-   */
-  public class DummyChunkInputStream extends ChunkInputStream {
-
-    // Stores the read chunk data in each readChunk call
-    private List<ByteString> readByteBuffers = new ArrayList<>();
-
-    DummyChunkInputStream(ChunkInfo chunkInfo,
-        BlockID blockId,
-        XceiverClientSpi xceiverClient,
-        boolean verifyChecksum) {
-      super(chunkInfo, blockId, xceiverClient, verifyChecksum);
-    }
-
-    public DummyChunkInputStream(ChunkInfo chunkInfo,
-        BlockID blockId,
-        XceiverClientSpi xceiverClient,
-        boolean verifyChecksum,
-        byte[] data) {
-      super(chunkInfo, blockId, xceiverClient, verifyChecksum);
-      chunkData = data;
-    }
-
-    @Override
-    protected ByteString readChunk(ChunkInfo readChunkInfo) {
-      ByteString byteString = ByteString.copyFrom(chunkData,
-          (int) readChunkInfo.getOffset(),
-          (int) readChunkInfo.getLen());
-      readByteBuffers.add(byteString);
-      return byteString;
-    }
-
-    @Override
-    protected void checkOpen() {
-      // No action needed
-    }
-  }
-
-  /**
    * Match readData with the chunkData byte-wise.
    * @param readData Data read through ChunkInputStream
    * @param inputDataStartIndex first index (inclusive) in chunkData to compare
@@ -159,7 +117,7 @@ public class TestChunkInputStream {
     // chunk from offset 0 to 60 as the checksum boundary is at every 20
     // bytes. Verify that 60 bytes of chunk data are read and stored in the
     // buffers.
-    matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(),
+    matchWithInputData(chunkStream.getReadByteBuffers().get(0).toByteArray(),
         0, 60);
 
   }
@@ -187,7 +145,7 @@ public class TestChunkInputStream {
     byte[] b = new byte[30];
     chunkStream.read(b, 0, 30);
     matchWithInputData(b, 25, 30);
-    matchWithInputData(chunkStream.readByteBuffers.get(0).toByteArray(),
+    matchWithInputData(chunkStream.getReadByteBuffers().get(0).toByteArray(),
         20, 40);
 
     // After read, the position of the chunkStream is evaluated from the
diff --git a/hadoop-hdds/common/pom.xml b/hadoop-hdds/common/pom.xml
index 5b68679..07dea52 100644
--- a/hadoop-hdds/common/pom.xml
+++ b/hadoop-hdds/common/pom.xml
@@ -38,6 +38,32 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
   <dependencies>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop.version}</version>
+    </dependency>
+    <dependency>
+      <groupId>info.picocli</groupId>
+      <artifactId>picocli</artifactId>
+      <version>3.9.6</version>
+    </dependency>
+    <dependency>
+      <groupId>com.google.protobuf</groupId>
+      <artifactId>protobuf-java</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>com.google.guava</groupId>
+      <artifactId>guava</artifactId>
+      <scope>compile</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-config</artifactId>
     </dependency>
 
@@ -148,10 +174,16 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <version>1.16</version>
     </dependency>
     <dependency>
+      <groupId>org.mockito</groupId>
+      <artifactId>mockito-core</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
       <groupId>com.fasterxml.jackson.datatype</groupId>
       <artifactId>jackson-datatype-jsr310</artifactId>
       <version>${jackson2.version}</version>
     </dependency>
+
   </dependencies>
 
   <build>
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DFSConfigKeysLegacy.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DFSConfigKeysLegacy.java
new file mode 100644
index 0000000..f65d4db
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/DFSConfigKeysLegacy.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.hdds;
+
+/**
+ * Legacy HDFS keys used by ozone.
+ *
+ * THey are the HDFS specific config keys. It would be better to use ozone
+ * specific explicit configuration keys with ozone to make it more visible.
+ */
+@Deprecated
+public final class DFSConfigKeysLegacy {
+
+  private DFSConfigKeysLegacy() {
+  }
+
+  public static final String DFS_DATANODE_DNS_INTERFACE_KEY =
+      "dfs.datanode.dns.interface";
+  public static final String DFS_DATANODE_DNS_NAMESERVER_KEY =
+      "dfs.datanode.dns.nameserver";
+
+  public static final String DFS_DATANODE_HOST_NAME_KEY =
+      "dfs.datanode.hostname";
+
+  public static final String DFS_DATANODE_DATA_DIR_KEY =
+      "dfs.datanode.data.dir";
+
+  public static final String DFS_DATANODE_USE_DN_HOSTNAME =
+      "dfs.datanode.use.datanode.hostname";
+
+  public static final boolean DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT = false;
+
+  public static final String DFS_XFRAME_OPTION_ENABLED = "dfs.xframe.enabled";
+
+  public static final boolean DFS_XFRAME_OPTION_ENABLED_DEFAULT = true;
+
+  public static final String DFS_XFRAME_OPTION_VALUE = "dfs.xframe.value";
+
+  public static final String DFS_XFRAME_OPTION_VALUE_DEFAULT = "SAMEORIGIN";
+
+  public static final String DFS_METRICS_SESSION_ID_KEY =
+      "dfs.metrics.session-id";
+
+  public static final String NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
+      "net.topology.node.switch.mapping.impl";
+
+  public static final String DFS_CLIENT_HTTPS_KEYSTORE_RESOURCE_KEY =
+      "dfs.client.https.keystore.resource";
+
+  public static final String DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY =
+      "dfs.https.server.keystore.resource";
+
+  public static final String DFS_HTTP_POLICY_KEY = "dfs.http.policy";
+
+  public static final String DFS_DATANODE_KERBEROS_PRINCIPAL_KEY =
+      "dfs.datanode.kerberos.principal";
+
+  public static final String DFS_DATANODE_KEYTAB_FILE_KEY =
+      "dfs.datanode.keytab.file";
+
+  public static final String DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY =
+      "dfs.datanode.disk.check.min.gap";
+
+  public static final String DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT =
+      "15m";
+
+  public static final String DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY =
+      "dfs.datanode.disk.check.timeout";
+
+  public static final String DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT =
+      "10m";
+
+  public static final String DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY =
+      "dfs.datanode.failed.volumes.tolerated";
+
+  public static final int DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT = 0;
+
+  public static final String DFS_METRICS_PERCENTILES_INTERVALS_KEY =
+      "dfs.metrics.percentiles.intervals";
+
+}
+
+
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
index 01d0e21..b1bc70e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
@@ -34,24 +34,20 @@ import java.util.OptionalInt;
 import java.util.TimeZone;
 import java.util.concurrent.TimeUnit;
 
-import com.google.common.base.Preconditions;
-
-import org.apache.commons.lang3.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.client.BlockID;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.SCMSecurityProtocol;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.WriteChunkRequestProto;
+import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.protocolPB.SCMSecurityProtocolPB;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ipc.Client;
@@ -64,16 +60,16 @@ import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
 
+import com.google.common.base.Preconditions;
 import com.google.common.net.HostAndPort;
-
+import org.apache.commons.lang3.StringUtils;
+import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DNS_INTERFACE_KEY;
+import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DNS_NAMESERVER_KEY;
+import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_ADDRESS_KEY;
 import static org.apache.hadoop.hdds.recon.ReconConfigKeys.OZONE_RECON_DATANODE_PORT_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_INTERFACE_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DNS_NAMESERVER_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
-
-import org.apache.hadoop.security.UserGroupInformation;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -352,6 +348,7 @@ public final class HddsUtils {
       if (dnsInterface == null) {
         // Try the legacy configuration keys.
         dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
+        dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
         nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
       } else {
         // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
@@ -552,7 +549,7 @@ public final class HddsUtils {
     MetricsSystem metricsSystem = DefaultMetricsSystem.initialize(serverName);
     try {
       JvmMetrics.create(serverName,
-          configuration.get(DFSConfigKeys.DFS_METRICS_SESSION_ID_KEY),
+          configuration.get(DFSConfigKeysLegacy.DFS_METRICS_SESSION_ID_KEY),
           DefaultMetricsSystem.instance());
     } catch (MetricsException e) {
       LOG.info("Metrics source JvmMetrics already added to DataNode.");
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
new file mode 100644
index 0000000..a696a0b
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/StringUtils.java
@@ -0,0 +1,76 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds;
+
+import java.io.UnsupportedEncodingException;
+import java.nio.charset.StandardCharsets;
+
+/**
+ * Simple utility class to collection string conversion methods.
+ */
+public final class StringUtils {
+
+  private StringUtils() {
+  }
+
+  // Using the charset canonical name for String/byte[] conversions is much
+  // more efficient due to use of cached encoders/decoders.
+  private static final String UTF8_CSN = StandardCharsets.UTF_8.name();
+
+  /**
+   * Decode a specific range of bytes of the given byte array to a string
+   * using UTF8.
+   *
+   * @param bytes  The bytes to be decoded into characters
+   * @param offset The index of the first byte to decode
+   * @param length The number of bytes to decode
+   * @return The decoded string
+   */
+  public static String bytes2String(byte[] bytes, int offset, int length) {
+    try {
+      return new String(bytes, offset, length, UTF8_CSN);
+    } catch (UnsupportedEncodingException e) {
+      // should never happen!
+      throw new IllegalArgumentException("UTF8 encoding is not supported", e);
+    }
+  }
+
+  /**
+   * Decode a specific range of bytes of the given byte array to a string
+   * using UTF8.
+   *
+   * @param bytes The bytes to be decoded into characters
+   * @return The decoded string
+   */
+  public static String bytes2String(byte[] bytes) {
+    return bytes2String(bytes, 0, bytes.length);
+  }
+
+  /**
+   * Converts a string to a byte array using UTF8 encoding.
+   */
+  public static byte[] string2Bytes(String str) {
+    try {
+      return str.getBytes(UTF8_CSN);
+    } catch (UnsupportedEncodingException e) {
+      // should never happen!
+      throw new IllegalArgumentException("UTF8 decoding is not supported", e);
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/AbstractSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/AbstractSpaceUsageSource.java
new file mode 100644
index 0000000..2de8da7
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/AbstractSpaceUsageSource.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.function.LongSupplier;
+
+/**
+ * Convenience parent class for {@code SpaceUsageSource} implementations.
+ */
+public abstract class AbstractSpaceUsageSource implements SpaceUsageSource {
+
+  private final File file;
+  private final String path;
+
+  /**
+   * @param file the path to check disk usage in
+   */
+  protected AbstractSpaceUsageSource(File file) {
+    this.file = file;
+    try {
+      this.path = file.getCanonicalPath();
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  /**
+   * Measures execution time of {@code supplier#getAsLong} and logs it via the
+   * given {@code logger}.
+   * @return the same value as returned by {@code supplier#getAsLong}
+   */
+  protected static long time(LongSupplier supplier, Logger logger) {
+    long start = Time.monotonicNow();
+
+    long result = supplier.getAsLong();
+
+    long end = Time.monotonicNow();
+    long elapsed = end - start;
+    logger.debug("Completed check in {} ms, result: {}", elapsed, result);
+
+    return result;
+  }
+
+  protected String getPath() {
+    return path;
+  }
+
+  protected File getFile() {
+    return file;
+  }
+
+  @Override
+  public String toString() {
+    return path;
+  }
+
+  @Override
+  public long getAvailable() {
+    return file.getFreeSpace();
+  }
+
+  @Override
+  public long getCapacity() {
+    return file.getTotalSpace();
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
new file mode 100644
index 0000000..e915583
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/CachingSpaceUsageSource.java
@@ -0,0 +1,142 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.annotation.Nullable;
+import java.time.Duration;
+import java.util.OptionalLong;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+
+/**
+ * Stores space usage and refreshes it periodically.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class CachingSpaceUsageSource implements SpaceUsageSource {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(CachingSpaceUsageSource.class);
+
+  private final ScheduledExecutorService executor;
+  private final AtomicLong cachedValue = new AtomicLong();
+  private final Duration refresh;
+  private final SpaceUsageSource source;
+  private final SpaceUsagePersistence persistence;
+  private boolean running;
+  private ScheduledFuture<?> scheduledFuture;
+
+  public CachingSpaceUsageSource(SpaceUsageCheckParams params) {
+    this(params, createExecutor(params));
+  }
+
+  @VisibleForTesting
+  CachingSpaceUsageSource(SpaceUsageCheckParams params,
+      ScheduledExecutorService executor) {
+    Preconditions.checkArgument(params != null, "params == null");
+
+    refresh = params.getRefresh();
+    source = params.getSource();
+    persistence = params.getPersistence();
+    this.executor = executor;
+
+    Preconditions.checkArgument(refresh.isZero() == (executor == null),
+        "executor should be provided if and only if refresh is requested");
+
+    loadInitialValue();
+  }
+
+  @Override
+  public long getCapacity() {
+    return source.getCapacity();
+  }
+
+  @Override
+  public long getAvailable() {
+    return source.getAvailable();
+  }
+
+  @Override
+  public long getUsedSpace() {
+    return cachedValue.get();
+  }
+
+  public void start() {
+    if (executor != null) {
+      long initialDelay = cachedValue.get() > 0 ? refresh.toMillis() : 0;
+      if (!running) {
+        scheduledFuture = executor.scheduleWithFixedDelay(
+            this::refresh, initialDelay, refresh.toMillis(), MILLISECONDS);
+        running = true;
+      }
+    } else {
+      refresh();
+    }
+  }
+
+  public void shutdown() {
+    persistence.save(this); // save cached value
+
+    if (executor != null) {
+      if (running && scheduledFuture != null) {
+        scheduledFuture.cancel(true);
+      }
+      running = false;
+
+      executor.shutdown();
+    }
+  }
+
+  private void loadInitialValue() {
+    final OptionalLong initialValue = persistence.load();
+    initialValue.ifPresent(cachedValue::set);
+  }
+
+  private void refresh() {
+    try {
+      cachedValue.set(source.getUsedSpace());
+    } catch (RuntimeException e) {
+      LOG.warn("Error refreshing space usage for {}", source, e);
+    }
+  }
+
+  private static @Nullable ScheduledExecutorService createExecutor(
+      SpaceUsageCheckParams params) {
+
+    if (params.getRefresh().isZero()) {
+      return null;
+    }
+
+    return Executors.newScheduledThreadPool(1,
+        new ThreadFactoryBuilder().setDaemon(true)
+            .setNameFormat("DiskUsage-" + params.getPath() + "-%n")
+            .build());
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DU.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DU.java
new file mode 100644
index 0000000..b81d4cf
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DU.java
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.util.Shell;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * Uses the unix 'du' program to calculate space usage.  Can be slow if there
+ * are many files.
+ *
+ * @see SpaceUsageSource
+ */
+public class DU extends AbstractSpaceUsageSource {
+
+  private static final Logger LOG = LoggerFactory.getLogger(DU.class);
+
+  private final DUShell duShell;
+  private final String[] command;
+  private final String commandString;
+  private final String excludePattern;
+
+  public DU(File path) {
+    this(path, null);
+  }
+
+  public DU(File path, String excludePattern) {
+    super(path);
+
+    this.excludePattern = excludePattern;
+    command = constructCommand();
+    commandString = String.join(" ", command);
+    duShell = new DUShell();
+  }
+
+  @Override
+  public long getUsedSpace() {
+    return time(duShell::getUsed, LOG);
+  }
+
+  private String[] constructCommand() {
+    List<String> parts = new LinkedList<>();
+    parts.add("du");
+    parts.add("-sk");
+    if (excludePattern != null) {
+      if (Shell.MAC) {
+        parts.add("-I");
+      } else {
+        parts.add("--exclude");
+      }
+      parts.add(excludePattern);
+    }
+    parts.add(getPath());
+    return parts.toArray(new String[0]);
+  }
+
+  private final class DUShell extends Shell {
+
+    private final AtomicLong value = new AtomicLong();
+
+    /**
+     * @throws UncheckedIOException if shell command exited with error code
+     */
+    public long getUsed() {
+      try {
+        super.run();
+        return value.get();
+      } catch (IOException e) {
+        throw new UncheckedIOException(e);
+      }
+    }
+
+    @Override
+    public String toString() {
+      return commandString + "\n" + value.get() + "\t" + getPath();
+    }
+
+    @Override
+    protected String[] getExecString() {
+      return command;
+    }
+
+    @Override
+    protected void parseExecResult(BufferedReader lines) throws IOException {
+      String line = lines.readLine();
+      if (line == null) {
+        throw new IOException("Expecting a line not the end of stream");
+      }
+
+      String[] tokens = line.split("\t");
+      if (tokens.length == 0) {
+        throw new IOException("Illegal du output");
+      }
+
+      long kilobytes = Long.parseLong(tokens[0]);
+      value.set(kilobytes * OzoneConsts.KB);
+    }
+  }
+
+  @SuppressWarnings("squid:S106") // command-line program, output to stdout
+  public static void main(String[] args) {
+    String path = ".";
+    if (args.length > 0) {
+      path = args[0];
+    }
+
+    DU du = new DU(new File(path));
+    du.duShell.getUsed();
+    System.out.println(du.duShell);
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
new file mode 100644
index 0000000..118da1f
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DUFactory.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigTag;
+import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import java.io.File;
+import java.time.Duration;
+
+/**
+ * Uses DU for all volumes.  Saves used value in cache file.
+ */
+public class DUFactory implements SpaceUsageCheckFactory {
+
+  private static final String DU_CACHE_FILE = "scmUsed";
+  private static final String EXCLUDE_PATTERN = "*.tmp.*";
+
+  private static final String CONFIG_PREFIX = "hdds.datanode.du";
+
+  private Conf conf;
+
+  @Override
+  public SpaceUsageCheckFactory setConfiguration(Configuration configuration) {
+    conf = OzoneConfiguration.of(configuration).getObject(Conf.class);
+    return this;
+  }
+
+  @Override
+  public SpaceUsageCheckParams paramsFor(File dir) {
+    Duration refreshPeriod = conf.getRefreshPeriod();
+
+    SpaceUsageSource source = new DU(dir, EXCLUDE_PATTERN);
+
+    SpaceUsagePersistence persistence = new SaveSpaceUsageToFile(
+        new File(dir, DU_CACHE_FILE), refreshPeriod);
+
+    return new SpaceUsageCheckParams(dir, source, refreshPeriod, persistence);
+  }
+
+  /**
+   * Configuration for {@link DUFactory}.
+   */
+  @ConfigGroup(prefix = CONFIG_PREFIX)
+  public static class Conf {
+
+    private static final String REFRESH_PERIOD = "refresh.period";
+
+    @Config(
+        key = REFRESH_PERIOD,
+        defaultValue = "1h",
+        type = ConfigType.TIME,
+        tags = { ConfigTag.DATANODE },
+        description = "Disk space usage information will be refreshed with the"
+            + "specified period following the completion of the last check."
+    )
+    private long refreshPeriod;
+
+    public void setRefreshPeriod(long millis) {
+      refreshPeriod = millis;
+    }
+
+    public Duration getRefreshPeriod() {
+      return Duration.ofMillis(refreshPeriod);
+    }
+
+    static String configKeyForRefreshPeriod() {
+      return CONFIG_PREFIX + "." + REFRESH_PERIOD;
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsage.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsage.java
new file mode 100644
index 0000000..1d651cc
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsage.java
@@ -0,0 +1,52 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.fs.DF;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+
+/**
+ * Fast but inaccurate class to tell how much space a directory is using.
+ * This implementation makes the assumption that the entire mount is used for
+ * the directory.  This is similar to {@link DF}, which (despite the name) also
+ * uses {@code java.io.File} to get filesystem space usage information.
+ *
+ * @see SpaceUsageSource
+ */
+public class DedicatedDiskSpaceUsage extends AbstractSpaceUsageSource {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(DedicatedDiskSpaceUsage.class);
+
+  public DedicatedDiskSpaceUsage(File path) {
+    super(path);
+  }
+
+  @Override
+  public long getUsedSpace() {
+    return time(this::calculateUsedSpace, LOG);
+  }
+
+  private long calculateUsedSpace() {
+    return getCapacity() - getAvailable();
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
new file mode 100644
index 0000000..37953d9
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/DedicatedDiskSpaceUsageFactory.java
@@ -0,0 +1,86 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigTag;
+import org.apache.hadoop.hdds.conf.ConfigType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import java.io.File;
+import java.time.Duration;
+
+/**
+ * Uses DedicatedDiskSpaceUsage for all volumes.  Does not save results since
+ * the information is relatively cheap to obtain.
+ */
+public class DedicatedDiskSpaceUsageFactory implements SpaceUsageCheckFactory {
+
+  private static final String CONFIG_PREFIX = "hdds.datanode.df";
+
+  private Conf conf;
+
+  @Override
+  public SpaceUsageCheckFactory setConfiguration(Configuration configuration) {
+    conf = OzoneConfiguration.of(configuration).getObject(Conf.class);
+    return this;
+  }
+
+  @Override
+  public SpaceUsageCheckParams paramsFor(File dir) {
+    Duration refreshPeriod = conf.getRefreshPeriod();
+
+    SpaceUsageSource source = new DedicatedDiskSpaceUsage(dir);
+
+    return new SpaceUsageCheckParams(dir, source, refreshPeriod,
+        SpaceUsagePersistence.None.INSTANCE);
+  }
+
+  /**
+   * Configuration for {@link DedicatedDiskSpaceUsageFactory}.
+   */
+  @ConfigGroup(prefix = CONFIG_PREFIX)
+  public static class Conf {
+
+    private static final String REFRESH_PERIOD = "refresh.period";
+
+    @Config(
+        key = REFRESH_PERIOD,
+        defaultValue = "5m",
+        type = ConfigType.TIME,
+        tags = { ConfigTag.DATANODE },
+        description = "Disk space usage information will be refreshed with the"
+            + "specified period following the completion of the last check."
+    )
+    private long refreshPeriod;
+
+    public void setRefreshPeriod(long millis) {
+      refreshPeriod = millis;
+    }
+
+    public Duration getRefreshPeriod() {
+      return Duration.ofMillis(refreshPeriod);
+    }
+
+    static String configKeyForRefreshPeriod() {
+      return CONFIG_PREFIX + "." + REFRESH_PERIOD;
+    }
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java
new file mode 100644
index 0000000..cd9a545
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SaveSpaceUsageToFile.java
@@ -0,0 +1,129 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.io.OutputStreamWriter;
+import java.io.Writer;
+import java.nio.file.Files;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.OptionalLong;
+import java.util.Scanner;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+
+/**
+ * Saves and loads space usage information to/from a file.
+ */
+public class SaveSpaceUsageToFile implements SpaceUsagePersistence {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(SaveSpaceUsageToFile.class);
+
+  private final File file;
+  private final Duration expiry;
+
+  public SaveSpaceUsageToFile(File file, Duration expiry) {
+    this.file = file;
+    this.expiry = expiry;
+
+    Preconditions.checkArgument(file != null, "file == null");
+    Preconditions.checkArgument(expiry != null, "expiry == null");
+    Preconditions.checkArgument(!expiry.isNegative() && !expiry.isZero(),
+        "invalid expiry: %s", expiry);
+  }
+
+  /**
+   * Read in the cached DU value and return it if it is newer than the given
+   * expiry. Slight imprecision is not critical
+   * and skipping DU can significantly shorten the startup time.
+   * If the cached value is not available or too old, empty {@code OptionalLong}
+   * is returned.
+   */
+  @Override
+  public OptionalLong load() {
+    try (Scanner sc = new Scanner(file, UTF_8.name())) {
+      // Get the recorded value from the file.
+      if (!sc.hasNextLong()) {
+        LOG.info("Cached usage info in {} has no value", file);
+        return OptionalLong.empty();
+      }
+      long cachedValue = sc.nextLong();
+
+      // Get the recorded time from the file.
+      if (!sc.hasNextLong()) {
+        LOG.info("Cached usage info in {} has no time", file);
+        return OptionalLong.empty();
+      }
+
+      Instant time = Instant.ofEpochMilli(sc.nextLong());
+      if (isExpired(time)) {
+        LOG.info("Cached usage info in {} is expired: {} ", file, time);
+        return OptionalLong.empty();
+      }
+
+      LOG.info("Cached usage info found in {}: {} at {}", file, cachedValue,
+          time);
+
+      return OptionalLong.of(cachedValue);
+    } catch (FileNotFoundException e) {
+      LOG.info("Cached usage info file {} not found", file);
+      return OptionalLong.empty();
+    }
+  }
+
+  /**
+   * Write the current usage info to the cache file.
+   */
+  @Override
+  public void save(SpaceUsageSource source) {
+    try {
+      Files.deleteIfExists(file.toPath());
+    } catch (IOException e) {
+      LOG.warn("Failed to delete old usage file {}: {}.", file, e.getMessage());
+    }
+
+    long used = source.getUsedSpace();
+    if (used > 0) {
+      Instant now = Instant.now();
+      try (OutputStream fileOutput = new FileOutputStream(file);
+           Writer out = new OutputStreamWriter(fileOutput, UTF_8)) {
+        // time is written last, so that truncated writes won't be valid.
+        out.write(used + " " + now.toEpochMilli());
+        out.flush();
+      } catch (IOException e) {
+        // If write failed, the volume might be bad. Since the cache file is
+        // not critical, log the error and continue.
+        LOG.warn("Failed to write usage to {}", file, e);
+      }
+    }
+  }
+
+  private boolean isExpired(Instant time) {
+    return time.plus(expiry).isBefore(Instant.now());
+  }
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
new file mode 100644
index 0000000..60e24f2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckFactory.java
@@ -0,0 +1,145 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigTag;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.UncheckedIOException;
+import java.lang.reflect.Constructor;
+import java.lang.reflect.InvocationTargetException;
+
+/**
+ * Configures disk space checks (du, df, etc.) for HDDS volumes, allowing
+ * different implementations and parameters for different volumes.
+ * Eg. if a volume has a dedicated disk, it can use the faster
+ * df-based implementation.
+ *
+ * {@code SpaceUsageCheckFactory} implementations should have
+ * a no-arg constructor for config-based instantiation.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface SpaceUsageCheckFactory {
+
+  /**
+   * Creates configuration for the HDDS volume rooted at {@code dir}.
+   *
+   * @throws UncheckedIOException if canonical path for {@code dir} cannot be
+   * resolved
+   */
+  SpaceUsageCheckParams paramsFor(File dir);
+
+  /**
+   * Updates the factory with global configuration.
+   * @return factory configured with {@code conf}
+   */
+  default SpaceUsageCheckFactory setConfiguration(Configuration conf) {
+    // override if configurable
+    return this;
+  }
+
+  /**
+   * Creates a "global" implementation based on the class specified for
+   * {@link Conf#setClassName(String)} in {@code conf}.
+   * Defaults to {@link DUFactory} if no class is configured or it cannot be
+   * instantiated.
+   */
+  static SpaceUsageCheckFactory create(Configuration config) {
+    Conf conf = OzoneConfiguration.of(config).getObject(Conf.class);
+    Class<? extends SpaceUsageCheckFactory> aClass = null;
+    String className = conf.getClassName();
+    if (className != null && !className.isEmpty()) {
+      try {
+        aClass = config.getClassByName(className)
+            .asSubclass(SpaceUsageCheckFactory.class);
+      } catch (ClassNotFoundException | RuntimeException e) {
+        Logger log = LoggerFactory.getLogger(SpaceUsageCheckFactory.class);
+        log.warn("Error trying to create SpaceUsageCheckFactory: '{}'",
+            className, e);
+      }
+    }
+
+    SpaceUsageCheckFactory instance = null;
+
+    if (aClass != null) {
+      try {
+        Constructor<? extends SpaceUsageCheckFactory> constructor =
+            aClass.getConstructor();
+        instance = constructor.newInstance();
+      } catch (IllegalAccessException | InstantiationException |
+          InvocationTargetException | NoSuchMethodException e) {
+
+        Logger log = LoggerFactory.getLogger(SpaceUsageCheckFactory.class);
+        log.warn("Error trying to create {}", aClass, e);
+      }
+    }
+
+    if (instance == null) {
+      instance = defaultImplementation();
+    }
+
+    return instance.setConfiguration(config);
+  }
+
+  static DUFactory defaultImplementation() {
+    return new DUFactory();
+  }
+
+  String CONFIG_PREFIX = "hdds.datanode.du.factory";
+
+  /**
+   * Configuration for {@link SpaceUsageCheckFactory}.
+   */
+  @ConfigGroup(prefix = CONFIG_PREFIX)
+  class Conf {
+
+    private static final String CLASSNAME_KEY = "classname";
+
+    @Config(
+        key = CLASSNAME_KEY,
+        defaultValue = "",
+        tags = { ConfigTag.DATANODE },
+        description = "The fully qualified name of the factory class that "
+            + "creates objects for providing disk space usage information.  It "
+            + "should implement the SpaceUsageCheckFactory interface."
+    )
+    private String className;
+
+    public void setClassName(String className) {
+      this.className = className;
+    }
+
+    public String getClassName() {
+      return className;
+    }
+
+    public static String configKeyForClassName() {
+      return CONFIG_PREFIX + "." + CLASSNAME_KEY;
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckParams.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckParams.java
new file mode 100644
index 0000000..a16d5c1
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageCheckParams.java
@@ -0,0 +1,92 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.UncheckedIOException;
+import java.time.Duration;
+
+import static com.google.common.base.Preconditions.checkArgument;
+
+/**
+ * Parameters for performing disk space usage checks.  Bundles the source of the
+ * information, refresh duration, a way to save/load the last available value.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class SpaceUsageCheckParams {
+
+  private final SpaceUsageSource source;
+  private final Duration refresh;
+  private final SpaceUsagePersistence persistence;
+  private final String path;
+  private final File dir;
+
+  /**
+   * @param refresh The period of refreshing space usage information from
+   *                {@code source}.  May be {@link Duration#ZERO} to skip
+   *                periodic refresh, but cannot be negative.
+   * @throws UncheckedIOException if canonical path for {@code dir} cannot be
+   * resolved
+   */
+  public SpaceUsageCheckParams(File dir, SpaceUsageSource source,
+      Duration refresh, SpaceUsagePersistence persistence) {
+
+    checkArgument(dir != null, "dir == null");
+    checkArgument(source != null, "source == null");
+    checkArgument(refresh != null, "refresh == null");
+    checkArgument(persistence != null, "persistence == null");
+    checkArgument(!refresh.isNegative(), "refresh is negative");
+
+    this.dir = dir;
+    this.source = source;
+    this.refresh = refresh;
+    this.persistence = persistence;
+
+    try {
+      path = dir.getCanonicalPath();
+    } catch (IOException e) {
+      throw new UncheckedIOException(e);
+    }
+  }
+
+  public File getDir() {
+    return dir;
+  }
+
+  public String getPath() {
+    return path;
+  }
+
+  public SpaceUsageSource getSource() {
+    return source;
+  }
+
+  public Duration getRefresh() {
+    return refresh;
+  }
+
+  public SpaceUsagePersistence getPersistence() {
+    return persistence;
+  }
+
+}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsagePersistence.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsagePersistence.java
new file mode 100644
index 0000000..fca90be
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsagePersistence.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.util.OptionalLong;
+
+/**
+ * Interface for saving and loading space usage information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface SpaceUsagePersistence {
+
+  /**
+   * @return an {@link OptionalLong} with the value if loaded successfully,
+   * otherwise an empty one
+   */
+  OptionalLong load();
+
+  /**
+   * Save the space usage information got from
+   * {@link SpaceUsageSource#getUsedSpace()}.
+   */
+  void save(SpaceUsageSource source);
+
+  /**
+   * Does not persist space usage information at all.  Use for sources that are
+   * relatively cheap to use (and also for testing).
+   */
+  class None implements SpaceUsagePersistence {
+
+    public static final SpaceUsagePersistence INSTANCE = new None();
+
+    @Override
+    public OptionalLong load() {
+      return OptionalLong.empty();
+    }
+
+    @Override
+    public void save(SpaceUsageSource source) {
+      // no-op
+    }
+  }
+
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java
similarity index 58%
copy from hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java
index db052a7..59ba320 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/StorageContainerServiceProvider.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/SpaceUsageSource.java
@@ -1,6 +1,4 @@
-package org.apache.hadoop.ozone.recon.spi;
-
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -17,9 +15,29 @@ package org.apache.hadoop.ozone.recon.spi;
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import java.io.UncheckedIOException;
 
 /**
- * Interface to access SCM endpoints.
+ * Interface for implementations that can tell how much space
+ * is used in a directory.
  */
-public interface StorageContainerServiceProvider {
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public interface SpaceUsageSource {
+
+  /**
+   * @return space usage in bytes
+   * @throws UncheckedIOException if I/O exception occurs while calculating
+   * space use info
+   */
+  long getUsedSpace();
+
+  long getCapacity();
+
+  long getAvailable();
 }
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/package-info.java
similarity index 87%
copy from hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/package-info.java
index c8e74d2..a7f473e 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/fs/package-info.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -16,12 +16,7 @@
  * limitations under the License.
  */
 
-.overview-content {
-  margin: 20px 5px;
-  .icon-small {
-    font-size: 16px;
-  }
-  .meta {
-    font-size: 12px;
-  }
-}
+/**
+ * Filesystem-related utilities.
+ */
+package org.apache.hadoop.hdds.fs;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 27970a5..9dc33c3 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -24,6 +24,7 @@ import java.security.cert.X509Certificate;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 import java.util.stream.Collectors;
@@ -38,13 +39,10 @@ import org.apache.hadoop.hdds.security.x509.certificate.authority.CertificateSer
 import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
 
 import org.apache.ratis.RaftConfigKeys;
 import org.apache.ratis.client.RaftClient;
-import org.apache.ratis.client.RaftClientConfigKeys;
 import org.apache.ratis.conf.RaftProperties;
-import org.apache.ratis.grpc.GrpcConfigKeys;
 import org.apache.ratis.grpc.GrpcFactory;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.apache.ratis.proto.RaftProtos;
@@ -57,17 +55,30 @@ import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
-import org.apache.ratis.util.SizeInBytes;
 import org.apache.ratis.util.TimeDuration;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
+import static org.apache.hadoop.ozone.conf.DatanodeRatisServerConfig.DATANODE_RATIS_SERVER_CONFIG_PREFIX;
+
 /**
  * Ratis helper methods.
  */
 public interface RatisHelper {
   Logger LOG = LoggerFactory.getLogger(RatisHelper.class);
 
+  // Ratis Client and Grpc header regex filters.
+  String RATIS_CLIENT_HEADER_REGEX = "raft\\.client\\.([a-z\\.]+)";
+  String RATIS_GRPC_CLIENT_HEADER_REGEX = "raft\\.grpc\\.(?!server|tls)" +
+      "([a-z\\.]+)";
+
+  // Ratis Server header regex filter.
+  String RATIS_SERVER_HEADER_REGEX = "datanode\\.ratis\\.raft\\.server\\" +
+      ".([a-z\\.]+)";
+  String RATIS_SERVER_GRPC_HEADER_REGEX = "datanode\\.ratis\\.raft\\.grpc\\" +
+      ".([a-z\\.]+)";
+
+
   static String toRaftPeerIdString(DatanodeDetails id) {
     return id.getUuidString();
   }
@@ -140,25 +151,12 @@ public interface RatisHelper {
   }
 
   static RaftClient newRaftClient(RpcType rpcType, Pipeline pipeline,
-      RetryPolicy retryPolicy, int maxOutStandingRequest,
-      GrpcTlsConfig tlsConfig, TimeDuration timeout) throws IOException {
+      RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
+      Configuration ozoneConfiguration) throws IOException {
     return newRaftClient(rpcType,
         toRaftPeerId(pipeline.getLeaderNode()),
         newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()),
-            pipeline.getNodes()), retryPolicy, maxOutStandingRequest, tlsConfig,
-        timeout);
-  }
-
-  static TimeDuration getClientRequestTimeout(Configuration conf) {
-    // Set the client requestTimeout
-    final TimeUnit timeUnit =
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getUnit();
-    final long duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    return TimeDuration.valueOf(duration, timeUnit);
+            pipeline.getNodes()), retryPolicy, tlsConfig, ozoneConfiguration);
   }
 
   static RpcType getRpcType(Configuration conf) {
@@ -168,43 +166,43 @@ public interface RatisHelper {
   }
 
   static RaftClient newRaftClient(RaftPeer leader, Configuration conf) {
-    return newRaftClient(getRpcType(conf), leader, RetryPolicies.noRetry(),
-        GrpcConfigKeys.OutputStream.OUTSTANDING_APPENDS_MAX_DEFAULT,
-        getClientRequestTimeout(conf));
+    return newRaftClient(getRpcType(conf), leader,
+        RatisHelper.createRetryPolicy(conf), conf);
   }
 
   static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
-      RetryPolicy retryPolicy, int maxOutstandingRequests,
-      GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) {
+      RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
+      Configuration configuration) {
     return newRaftClient(rpcType, leader.getId(),
         newRaftGroup(Collections.singletonList(leader)), retryPolicy,
-        maxOutstandingRequests, tlsConfig, clientRequestTimeout);
+        tlsConfig, configuration);
   }
 
   static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
-      RetryPolicy retryPolicy, int maxOutstandingRequests,
-      TimeDuration clientRequestTimeout) {
+      RetryPolicy retryPolicy,
+      Configuration ozoneConfiguration) {
     return newRaftClient(rpcType, leader.getId(),
-        newRaftGroup(Collections.singletonList(leader)), retryPolicy,
-        maxOutstandingRequests, null, clientRequestTimeout);
+        newRaftGroup(Collections.singletonList(leader)), retryPolicy, null,
+        ozoneConfiguration);
   }
 
+  @SuppressWarnings("checkstyle:ParameterNumber")
   static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader,
-      RaftGroup group, RetryPolicy retryPolicy, int maxOutStandingRequest,
-      GrpcTlsConfig tlsConfig, TimeDuration clientRequestTimeout) {
+      RaftGroup group, RetryPolicy retryPolicy,
+      GrpcTlsConfig tlsConfig, Configuration ozoneConfiguration) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("newRaftClient: {}, leader={}, group={}",
           rpcType, leader, group);
     }
     final RaftProperties properties = new RaftProperties();
+
     RaftConfigKeys.Rpc.setType(properties, rpcType);
-    RaftClientConfigKeys.Rpc
-        .setRequestTimeout(properties, clientRequestTimeout);
 
-    GrpcConfigKeys.setMessageSizeMax(properties,
-        SizeInBytes.valueOf(OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE));
-    GrpcConfigKeys.OutputStream.setOutstandingAppendsMax(properties,
-        maxOutStandingRequest);
+    // Set the ratis client headers which are matching with regex.
+    createRaftClientProperties(ozoneConfiguration, properties);
+
+    // Set the ratis grpc client headers which are matching with regex.
+    createRaftGrpcProperties(ozoneConfiguration, properties);
 
     RaftClient.Builder builder =  RaftClient.newBuilder()
         .setRaftGroup(group)
@@ -219,6 +217,61 @@ public interface RatisHelper {
     return builder.build();
   }
 
+  /**
+   * Set all the properties matching with regex RATIS_CLIENT_HEADER_REGEX in
+   * ozone configuration object and configure it to RaftProperties.
+   * @param ozoneConf
+   * @param raftProperties
+   */
+  static void createRaftClientProperties(Configuration ozoneConf,
+      RaftProperties raftProperties) {
+    Map<String, String> ratisClientConf =
+        ozoneConf.getValByRegex(RATIS_CLIENT_HEADER_REGEX);
+    ratisClientConf.forEach((key, val) -> raftProperties.set(key, val));
+  }
+
+  /**
+   * Set all the properties matching with regex
+   * {@link RatisHelper#RATIS_GRPC_CLIENT_HEADER_REGEX} in ozone
+   * configuration object and configure it to RaftProperties.
+   * @param ozoneConf
+   * @param raftProperties
+   */
+  static void createRaftGrpcProperties(Configuration ozoneConf,
+      RaftProperties raftProperties) {
+    Map<String, String> ratisClientConf =
+        ozoneConf.getValByRegex(RATIS_GRPC_CLIENT_HEADER_REGEX);
+    ratisClientConf.forEach((key, val) -> raftProperties.set(key, val));
+  }
+
+  static void createRaftServerGrpcProperties(Configuration ozoneConf,
+      RaftProperties raftProperties) {
+    Map<String, String> ratisClientConf =
+        ozoneConf.getValByRegex(RATIS_SERVER_GRPC_HEADER_REGEX);
+    ratisClientConf.forEach((key, val) -> raftProperties.set(
+        removeDatanodePrefix(key), val));
+  }
+
+
+  /**
+   * Set all the properties matching with regex
+   * {@link RatisHelper#RATIS_SERVER_HEADER_REGEX} in ozone configuration
+   * object and configure it to RaftProperties.
+   * @param ozoneConf
+   * @param raftProperties
+   */
+  static void createRaftServerProperties(Configuration ozoneConf,
+       RaftProperties raftProperties) {
+    Map<String, String> ratisServerConf =
+        ozoneConf.getValByRegex(RATIS_SERVER_HEADER_REGEX);
+    ratisServerConf.forEach((key, val) -> raftProperties.set(
+        removeDatanodePrefix(key), val));
+  }
+
+  static String removeDatanodePrefix(String key) {
+    return key.replaceFirst(DATANODE_RATIS_SERVER_CONFIG_PREFIX, "");
+  }
+
   // For External gRPC client to server with gRPC TLS.
   // No mTLS for external client as SCM CA does not issued certificates for them
   static GrpcTlsConfig createTlsClientConfig(SecurityConfig conf,
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
index 2568ab8..c0b1d5d 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/recon/ReconConfigKeys.java
@@ -30,6 +30,8 @@ public final class ReconConfigKeys {
   private ReconConfigKeys() {
   }
 
+  public static final String RECON_SCM_CONFIG_PREFIX = "ozone.recon.scmconfig";
+
   public static final String OZONE_RECON_DATANODE_ADDRESS_KEY =
       "ozone.recon.datanode.address";
   public static final String OZONE_RECON_ADDRESS_KEY =
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 737add0..ee90798 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -105,21 +105,11 @@ public final class ScmConfigKeys {
   // TODO: Set to 1024 once RATIS issue around purge is fixed.
   public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
       1000000;
-
-  public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
-      "dfs.container.ratis.leader.num.pending.requests";
-  public static final int
-      DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT = 4096;
   public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
       "dfs.container.ratis.leader.pending.bytes.limit";
   public static final String
       DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT = "1GB";
-
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.client.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
+  
   public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
       "dfs.ratis.client.request.max.retries";
   public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT = 180;
@@ -133,11 +123,6 @@ public final class ScmConfigKeys {
   public static final TimeDuration
       DFS_RATIS_SERVER_RETRY_CACHE_TIMEOUT_DURATION_DEFAULT =
       TimeDuration.valueOf(600000, TimeUnit.MILLISECONDS);
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      "dfs.ratis.server.request.timeout.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      TimeDuration.valueOf(3000, TimeUnit.MILLISECONDS);
   public static final String
       DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
       "dfs.ratis.leader.election.minimum.timeout.duration";
@@ -149,12 +134,6 @@ public final class ScmConfigKeys {
       "dfs.ratis.snapshot.threshold";
   public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT = 100000;
 
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      "dfs.ratis.server.failure.duration";
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      TimeDuration.valueOf(120, TimeUnit.SECONDS);
-
   // TODO : this is copied from OzoneConsts, may need to move to a better place
   public static final String OZONE_SCM_CHUNK_SIZE_KEY = "ozone.scm.chunk.size";
   // 16 MB by default
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
index f938448..3287777 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientSpi.java
@@ -170,7 +170,6 @@ public abstract class XceiverClientSpi implements Closeable {
   /**
    * Check if an specfic commitIndex is replicated to majority/all servers.
    * @param index index to watch for
-   * @param timeout timeout provided for the watch operation to complete
    * @return reply containing the min commit index replicated to all or majority
    *         servers in case of a failure
    * @throws InterruptedException
@@ -178,7 +177,7 @@ public abstract class XceiverClientSpi implements Closeable {
    * @throws TimeoutException
    * @throws IOException
    */
-  public abstract XceiverClientReply watchForCommit(long index, long timeout)
+  public abstract XceiverClientReply watchForCommit(long index)
       throws InterruptedException, ExecutionException, TimeoutException,
       IOException;
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index 8ef6323..1a20d31 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -172,6 +172,13 @@ public interface ScmClient extends Closeable {
   List<Pipeline> listPipelines() throws IOException;
 
   /**
+   * Returns a pipeline with ID, if present.
+   * @return pipeline
+   * @throws IOException in case of exception
+   */
+  Pipeline getPipeline(HddsProtos.PipelineID pipelineID) throws IOException;
+
+  /**
    * Activates the pipeline given a pipeline ID.
    *
    * @param pipelineID PipelineID to activate.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
deleted file mode 100644
index 10a9b1b..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/LocatedContainer.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-import java.util.Set;
-
-/**
- * Holds the nodes that currently host the container for an object key hash.
- */
-@InterfaceAudience.Private
-public final class LocatedContainer {
-  private final String key;
-  private final String matchedKeyPrefix;
-  private final String containerName;
-  private final Set<DatanodeInfo> locations;
-  private final DatanodeInfo leader;
-
-  /**
-   * Creates a LocatedContainer.
-   *
-   * @param key object key
-   * @param matchedKeyPrefix prefix of key that was used to find the location
-   * @param containerName container name
-   * @param locations nodes that currently host the container
-   * @param leader node that currently acts as pipeline leader
-   */
-  public LocatedContainer(String key, String matchedKeyPrefix,
-      String containerName, Set<DatanodeInfo> locations, DatanodeInfo leader) {
-    this.key = key;
-    this.matchedKeyPrefix = matchedKeyPrefix;
-    this.containerName = containerName;
-    this.locations = locations;
-    this.leader = leader;
-  }
-
-  /**
-   * Returns the container name.
-   *
-   * @return container name
-   */
-  public String getContainerName() {
-    return this.containerName;
-  }
-
-  /**
-   * Returns the object key.
-   *
-   * @return object key
-   */
-  public String getKey() {
-    return this.key;
-  }
-
-  /**
-   * Returns the node that currently acts as pipeline leader.
-   *
-   * @return node that currently acts as pipeline leader
-   */
-  public DatanodeInfo getLeader() {
-    return this.leader;
-  }
-
-  /**
-   * Returns the nodes that currently host the container.
-   *
-   * @return {@code Set<DatanodeInfo>} nodes that currently host the container
-   */
-  public Set<DatanodeInfo> getLocations() {
-    return this.locations;
-  }
-
-  /**
-   * Returns the prefix of the key that was used to find the location.
-   *
-   * @return prefix of the key that was used to find the location
-   */
-  public String getMatchedKeyPrefix() {
-    return this.matchedKeyPrefix;
-  }
-
-  @Override
-  public boolean equals(Object otherObj) {
-    if (otherObj == null) {
-      return false;
-    }
-    if (!(otherObj instanceof LocatedContainer)) {
-      return false;
-    }
-    LocatedContainer other = (LocatedContainer)otherObj;
-    return this.key == null ? other.key == null : this.key.equals(other.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return key.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName()
-        + "{key=" + key
-        + "; matchedKeyPrefix=" + matchedKeyPrefix
-        + "; containerName=" + containerName
-        + "; locations=" + locations
-        + "; leader=" + leader
-        + "}";
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
deleted file mode 100644
index 0d2ecf7..0000000
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmLocatedBlock.java
+++ /dev/null
@@ -1,100 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.protocol;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-
-import java.util.List;
-import java.util.stream.Collectors;
-
-/**
- * Holds the nodes that currently host the block for a block key.
- */
-@InterfaceAudience.Private
-public final class ScmLocatedBlock {
-  private final String key;
-  private final List<DatanodeInfo> locations;
-  private final DatanodeInfo leader;
-
-  /**
-   * Creates a ScmLocatedBlock.
-   *
-   * @param key object key
-   * @param locations nodes that currently host the block
-   * @param leader node that currently acts as pipeline leader
-   */
-  public ScmLocatedBlock(final String key, final List<DatanodeInfo> locations,
-      final DatanodeInfo leader) {
-    this.key = key;
-    this.locations = locations;
-    this.leader = leader;
-  }
-
-  /**
-   * Returns the object key.
-   *
-   * @return object key
-   */
-  public String getKey() {
-    return this.key;
-  }
-
-  /**
-   * Returns the node that currently acts as pipeline leader.
-   *
-   * @return node that currently acts as pipeline leader
-   */
-  public DatanodeInfo getLeader() {
-    return this.leader;
-  }
-
-  /**
-   * Returns the nodes that currently host the block.
-   *
-   * @return {@literal List<DatanodeInfo>} nodes that currently host the block
-   */
-  public List<DatanodeInfo> getLocations() {
-    return this.locations;
-  }
-
-  @Override
-  public boolean equals(Object otherObj) {
-    if (otherObj == null) {
-      return false;
-    }
-    if (!(otherObj instanceof ScmLocatedBlock)) {
-      return false;
-    }
-    ScmLocatedBlock other = (ScmLocatedBlock)otherObj;
-    return this.key == null ? other.key == null : this.key.equals(other.key);
-  }
-
-  @Override
-  public int hashCode() {
-    return key.hashCode();
-  }
-
-  @Override
-  public String toString() {
-    return getClass().getSimpleName() + "{key=" + key + "; locations="
-        + locations.stream().map(loc -> loc.toString()).collect(Collectors
-            .joining(",")) + "; leader=" + leader + "}";
-  }
-}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
index 365750a..3ec3277 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import java.io.Closeable;
 import java.io.IOException;
 import java.util.List;
+
 import org.apache.hadoop.security.KerberosInfo;
 
 /**
@@ -140,6 +141,15 @@ public interface StorageContainerLocationProtocol extends Closeable {
   List<Pipeline> listPipelines() throws IOException;
 
   /**
+   * Returns Pipeline with given ID if present.
+   *
+   * @return Pipeline
+   *
+   * @throws IOException in case of any exception
+   */
+  Pipeline getPipeline(HddsProtos.PipelineID pipelineID) throws IOException;
+
+  /**
    * Activates a dormant pipeline.
    *
    * @param pipelineID ID of the pipeline to activate.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index a264121..6e36e7c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolPro
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ForceExitSafeModeResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetContainerWithPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.GetPipelineResponseProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.InSafeModeRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineRequestProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerLocationProtocolProtos.ListPipelineResponseProto;
@@ -331,6 +333,20 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
   }
 
   @Override
+  public Pipeline getPipeline(HddsProtos.PipelineID pipelineID)
+      throws IOException {
+    GetPipelineRequestProto request = GetPipelineRequestProto.newBuilder()
+            .setPipelineID(pipelineID)
+            .setTraceID(TracingUtil.exportCurrentSpan())
+            .build();
+    GetPipelineResponseProto response = submitRequest(Type.GetPipeline,
+        builder -> builder.setGetPipelineRequest(request))
+        .getGetPipelineResponse();
+
+    return Pipeline.getFromProtobuf(response.getPipeline());
+  }
+
+  @Override
   public void activatePipeline(HddsProtos.PipelineID pipelineID)
       throws IOException {
     ActivatePipelineRequestProto request =
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CRLCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CRLCodec.java
index 4ee793b..19a6f58 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CRLCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/security/x509/certificate/utils/CRLCodec.java
@@ -78,10 +78,10 @@ public class CRLCodec {
    * Returns a X509 CRL from the CRL Holder.
    *
    * @param holder - Holder
-   * @return X509Certificate.
+   * @return X509CRL - X509 CRL.
    * @throws CRLException - on Error.
    */
-  public static X509CRL get509CRL(X509CRLHolder holder)
+  public static X509CRL getX509CRL(X509CRLHolder holder)
       throws CRLException {
     return CRL_CONVERTER.getCRL(holder);
   }
@@ -97,7 +97,7 @@ public class CRLCodec {
       throws SCMSecurityException {
     LOG.trace("Getting PEM version of a CRL.");
     try {
-      return getPEMEncodedString(get509CRL(holder));
+      return getPEMEncodedString(getX509CRL(holder));
     } catch (CRLException exp) {
       throw new SCMSecurityException(exp);
     }
@@ -121,7 +121,8 @@ public class CRLCodec {
    * Gets the X.509 CRL from PEM encoded String.
    *
    * @param pemEncodedString - PEM encoded String.
-   * @return X509Certificate  - Certificate.
+   * @return X509CRL  - Crl.
+   * @throws CRLException - Thrown on Failure.
    * @throws CertificateException - Thrown on Failure.
    * @throws IOException          - Thrown on Failure.
    */
@@ -134,7 +135,7 @@ public class CRLCodec {
   }
 
   /**
-   * Get Certificate location.
+   * Get CRL location.
    *
    * @return Path
    */
@@ -145,8 +146,7 @@ public class CRLCodec {
   /**
    * Write the CRL pointed to the location by the configs.
    *
-   * @param crl - CRL to write.
-   * @throws SCMSecurityException - on Error.
+   * @param crl - X509CRL CRL to write.
    * @throws IOException          - on Error.
    */
   public void writeCRL(X509CRL crl)
@@ -162,8 +162,7 @@ public class CRLCodec {
    * @param crlHolder - CRL to write.
    * @param fileName - file name to write to.
    * @param overwrite - boolean value, true means overwrite an existing
-   * certificate.
-   * @throws SCMSecurityException - On Error.
+   * crl.
    * @throws IOException          - On Error.
    */
   public void writeCRL(X509CRLHolder crlHolder,
@@ -173,6 +172,16 @@ public class CRLCodec {
     writeCRL(location.toAbsolutePath(), fileName, pem, overwrite);
   }
 
+  /**
+   * Write the CRL to the specific file.
+   *
+   * @param basePath - Base Path where CRL file to be written.
+   * @param fileName - file name of CRL file.
+   * @param pemCRLString - PEN Encoded string
+   * @param force - boolean value, true means overwrite an existing
+   * crl.
+   * @throws IOException          - On Error.
+   */
   public synchronized void writeCRL(Path basePath, String fileName,
       String pemCRLString, boolean force)
       throws IOException {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
index a88ce47..9fcc270 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/MetadataKeyFilters.java
@@ -17,14 +17,15 @@
  */
 package org.apache.hadoop.hdds.utils;
 
-import com.google.common.base.Preconditions;
-import com.google.common.base.Strings;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.ozone.OzoneConsts;
-
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
+
+import com.google.common.base.Preconditions;
+import com.google.common.base.Strings;
+
 /**
  * An utility class to filter levelDB keys.
  */
@@ -156,7 +157,7 @@ public final class MetadataKeyFilters {
 
       accept = !positivePrefixList.isEmpty() && positivePrefixList.stream()
           .anyMatch(prefix -> {
-            byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
+            byte[] prefixBytes = StringUtils.string2Bytes(prefix);
             return prefixMatch(prefixBytes, currentKey);
           });
       if (accept) {
@@ -168,7 +169,7 @@ public final class MetadataKeyFilters {
 
       accept = !negativePrefixList.isEmpty() && negativePrefixList.stream()
           .allMatch(prefix -> {
-            byte[] prefixBytes = DFSUtil.string2Bytes(prefix);
+            byte[] prefixBytes = StringUtils.string2Bytes(prefix);
             return !prefixMatch(prefixBytes, currentKey);
           });
       if (accept) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
index b3f5838..b3ed819 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStore.java
@@ -159,7 +159,7 @@ public interface DBStore extends AutoCloseable {
   void commitBatchOperation(BatchOperation operation) throws IOException;
 
   /**
-   * Get current snapshot of OM DB store as an artifact stored on
+   * Get current snapshot of DB store as an artifact stored on
    * the local filesystem.
    * @return An object that encapsulates the checkpoint information along with
    * location.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
index 5994252..e3d5afb 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/DBStoreBuilder.java
@@ -22,8 +22,8 @@ package org.apache.hadoop.hdds.utils.db;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 
+import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.eclipse.jetty.util.StringUtil;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyOptions;
@@ -184,11 +184,11 @@ public final class DBStoreBuilder {
             dbProfile.toString(), name);
       }
     }
-    addTable(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+    addTable(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
         dbProfile.getColumnFamilyOptions());
     LOG.info("Using default column profile:{} for Table:{}",
         dbProfile.toString(),
-        DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY));
+        StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY));
   }
 
   private DBOptions getDbProfile() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
index 067f708..4bb9431 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBStore.java
@@ -19,8 +19,6 @@
 
 package org.apache.hadoop.hdds.utils.db;
 
-import static org.apache.hadoop.ozone.OzoneConsts.OM_DB_CHECKPOINTS_DIR_NAME;
-
 import javax.management.ObjectName;
 import java.io.File;
 import java.io.IOException;
@@ -32,8 +30,8 @@ import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.hdds.utils.RocksDBStoreMBean;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.metrics2.util.MBeans;
 
 import com.google.common.base.Preconditions;
@@ -101,7 +99,7 @@ public class RDBStore implements DBStore {
 
       for (int x = 0; x < columnFamilyHandles.size(); x++) {
         handleTable.put(
-            DFSUtil.bytes2String(columnFamilyHandles.get(x).getName()),
+            StringUtils.bytes2String(columnFamilyHandles.get(x).getName()),
             columnFamilyHandles.get(x));
       }
 
@@ -119,8 +117,8 @@ public class RDBStore implements DBStore {
       }
 
       //create checkpoints directory if not exists.
-      checkpointsParentDir = Paths.get(dbLocation.getParent(),
-          OM_DB_CHECKPOINTS_DIR_NAME).toString();
+      checkpointsParentDir =
+              Paths.get(dbLocation.getParent(), "db.checkpoints").toString();
       File checkpointsDir = new File(checkpointsParentDir);
       if (!checkpointsDir.exists()) {
         boolean success = checkpointsDir.mkdir();
@@ -130,7 +128,7 @@ public class RDBStore implements DBStore {
       }
 
       //Initialize checkpoint manager
-      checkPointManager = new RDBCheckpointManager(db, "om");
+      checkPointManager = new RDBCheckpointManager(db, "rdb");
       rdbMetrics = RDBMetrics.create();
 
     } catch (RocksDBException e) {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
index 56083a5..c306bea 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/RDBTable.java
@@ -23,7 +23,7 @@ import java.io.IOException;
 import java.nio.charset.StandardCharsets;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.StringUtils;
 
 import org.rocksdb.ColumnFamilyHandle;
 import org.rocksdb.ReadOptions;
@@ -185,7 +185,7 @@ class RDBTable implements Table<byte[], byte[]> {
   @Override
   public String getName() throws IOException {
     try {
-      return DFSUtil.bytes2String(this.getHandle().getName());
+      return StringUtils.bytes2String(this.getHandle().getName());
     } catch (RocksDBException rdbEx) {
       throw toIOException("Unable to get the table name.", rdbEx);
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java
index 122cac0..33ab57b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/StringCodec.java
@@ -19,7 +19,8 @@
 package org.apache.hadoop.hdds.utils.db;
 
 import java.io.IOException;
-import org.apache.hadoop.hdfs.DFSUtil;
+
+import org.apache.hadoop.hdds.StringUtils;
 
 /**
  * Codec to convert String to/from byte array.
@@ -29,7 +30,7 @@ public class StringCodec implements Codec<String> {
   @Override
   public byte[] toPersistedFormat(String object) throws IOException {
     if (object != null) {
-      return DFSUtil.string2Bytes(object);
+      return StringUtils.string2Bytes(object);
     } else {
       return null;
     }
@@ -38,7 +39,7 @@ public class StringCodec implements Codec<String> {
   @Override
   public String fromPersistedFormat(byte[] rawData) throws IOException {
     if (rawData != null) {
-      return DFSUtil.bytes2String(rawData);
+      return StringUtils.bytes2String(rawData);
     } else {
       return null;
     }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
index d8eb401..fb5c9a6 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/db/TableConfig.java
@@ -19,9 +19,10 @@
 
 package org.apache.hadoop.hdds.utils.db;
 
+import org.apache.hadoop.hdds.StringUtils;
+
 import org.apache.commons.lang3.builder.EqualsBuilder;
 import org.apache.commons.lang3.builder.HashCodeBuilder;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.rocksdb.ColumnFamilyDescriptor;
 import org.rocksdb.ColumnFamilyOptions;
 
@@ -56,7 +57,7 @@ public class TableConfig {
    * @return ColumnFamilyDescriptor
    */
   public ColumnFamilyDescriptor getDescriptor() {
-    return  new ColumnFamilyDescriptor(DFSUtil.string2Bytes(name),
+    return  new ColumnFamilyDescriptor(StringUtils.string2Bytes(name),
         columnFamilyOptions);
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index e637a09..147ede8 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -134,12 +134,6 @@ public final class OzoneConfigKeys {
   public static final String OZONE_CLIENT_STREAM_BUFFER_MAX_SIZE_DEFAULT =
       "128MB";
 
-  public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT =
-      "ozone.client.watch.request.timeout";
-
-  public static final String OZONE_CLIENT_WATCH_REQUEST_TIMEOUT_DEFAULT =
-      "30s";
-
   public static final String OZONE_CLIENT_MAX_RETRIES =
       "ozone.client.max.retries";
   public static final int OZONE_CLIENT_MAX_RETRIES_DEFAULT = 100;
@@ -269,11 +263,6 @@ public final class OzoneConfigKeys {
 
   public static final String DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR =
       "dfs.container.ratis.datanode.storage.dir";
-  public static final String DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_TIMEOUT_DURATION_DEFAULT;
   public static final String DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY =
       ScmConfigKeys.DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_KEY;
   public static final int DFS_RATIS_CLIENT_REQUEST_MAX_RETRIES_DEFAULT =
@@ -317,21 +306,11 @@ public final class OzoneConfigKeys {
       ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP;
   public static final int DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT =
       ScmConfigKeys.DFS_CONTAINER_RATIS_LOG_PURGE_GAP_DEFAULT;
-  public static final String DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS;
-  public static final int
-      DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT =
-      ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT;
   public static final String DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT =
       ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT;
   public static final String
       DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT =
       ScmConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT;
-  public static final String DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT;
   public static final String
       DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY =
       ScmConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY;
@@ -343,12 +322,6 @@ public final class OzoneConfigKeys {
   public static final long DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT =
       ScmConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_DEFAULT;
 
-  public static final String DFS_RATIS_SERVER_FAILURE_DURATION_KEY =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY;
-  public static final TimeDuration
-      DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT =
-      ScmConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT;
-
   public static final String HDDS_DATANODE_PLUGINS_KEY =
       "hdds.datanode.plugins";
 
@@ -449,6 +422,8 @@ public final class OzoneConfigKeys {
       "ozone.client.list.trash.keys.max";
   public static final int OZONE_CLIENT_LIST_TRASH_KEYS_MAX_DEFAULT = 1000;
 
+  public static final String OZONE_HTTP_BASEDIR = "ozone.http.basedir";
+
   /**
    * There is no need to instantiate this class.
    */
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index ee2ecf8..e11f977 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -121,7 +121,6 @@ public final class OzoneConsts {
   public static final String DELETED_BLOCK_DB = "deletedBlock.db";
   public static final String OM_DB_NAME = "om.db";
   public static final String OM_DB_BACKUP_PREFIX = "om.db.backup.";
-  public static final String OM_DB_CHECKPOINTS_DIR_NAME = "om.db.checkpoints";
   public static final String OZONE_MANAGER_TOKEN_DB_NAME = "om-token.db";
   public static final String SCM_DB_NAME = "scm.db";
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/DatanodeRatisServerConfig.java
new file mode 100644
index 0000000..3d6b8b2
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/DatanodeRatisServerConfig.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.conf;
+
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigType;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.RATIS;
+
+/**
+ * Datanode Ratis server Configuration.
+ */
+@ConfigGroup(prefix = "datanode.ratis")
+public class DatanodeRatisServerConfig {
+
+  public static final String DATANODE_RATIS_SERVER_CONFIG_PREFIX = "datanode" +
+      ".ratis.";
+
+  public static final String RATIS_SERVER_REQUEST_TIMEOUT_KEY =
+      "raft.server.rpc.request.timeout";
+
+  public static final String RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY =
+      "raft.server.watch.timeout";
+
+  public static final String RATIS_SERVER_NO_LEADER_TIMEOUT_KEY =
+      "raft.server.no-leader.timeout";
+
+  public static final String RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY =
+      "raft.server.rpcslowness.timeout";
+
+  public static final String RATIS_LEADER_NUM_PENDING_REQUESTS_KEY =
+      "raft.server.write.element-limit";
+
+  @Config(key = RATIS_SERVER_REQUEST_TIMEOUT_KEY,
+      defaultValue = "60s",
+      type = ConfigType.TIME,
+      tags = {OZONE, DATANODE, RATIS},
+      description = "The timeout duration of the ratis write request " +
+          "on Ratis Server."
+  )
+  private long requestTimeOut = 60 * 1000;
+
+  public long getRequestTimeOut() {
+    return requestTimeOut;
+  }
+
+  public void setRequestTimeOut(long requestTimeOut) {
+    this.requestTimeOut = requestTimeOut;
+  }
+
+  @Config(key = RATIS_SERVER_WATCH_REQUEST_TIMEOUT_KEY,
+      defaultValue = "180s",
+      type = ConfigType.TIME,
+      tags = {OZONE, DATANODE, RATIS},
+      description = "The timeout duration for watch request on Ratis Server. " +
+          "Timeout for the watch request in Ratis server to acknowledge a " +
+          "particular request is replayed to all servers."
+  )
+  private long watchTimeOut = 180 * 1000;
+
+  public long getWatchTimeOut() {
+    return watchTimeOut;
+  }
+
+  public void setWatchTimeOut(long watchTimeOut) {
+    this.watchTimeOut = watchTimeOut;
+  }
+
+  @Config(key = RATIS_SERVER_NO_LEADER_TIMEOUT_KEY,
+      defaultValue = "300s",
+      type = ConfigType.TIME,
+      tags = {OZONE, DATANODE, RATIS},
+      description = "Time out duration after which StateMachine gets notified" +
+          " that leader has not been elected for a long time and leader " +
+          "changes its role to Candidate."
+  )
+  private long noLeaderTimeout = 300 * 1000;
+
+  public long getNoLeaderTimeout() {
+    return noLeaderTimeout;
+  }
+
+  public void setNoLeaderTimeout(long noLeaderTimeout) {
+    this.noLeaderTimeout = noLeaderTimeout;
+  }
+
+  @Config(key = RATIS_FOLLOWER_SLOWNESS_TIMEOUT_KEY,
+      defaultValue = "300s",
+      type = ConfigType.TIME,
+      tags = {OZONE, DATANODE, RATIS},
+      description = "Timeout duration after which stateMachine will be " +
+          "notified that follower is slow. StateMachine will close down the " +
+          "pipeline."
+  )
+  private long followerSlownessTimeout = 300 * 1000;
+
+  public long getFollowerSlownessTimeout() {
+    return followerSlownessTimeout;
+  }
+
+  public void setFollowerSlownessTimeout(long followerSlownessTimeout) {
+    this.followerSlownessTimeout = followerSlownessTimeout;
+  }
+
+  @Config(key = RATIS_LEADER_NUM_PENDING_REQUESTS_KEY,
+      defaultValue = "1024",
+      type = ConfigType.INT,
+      tags = {OZONE, DATANODE, RATIS, PERFORMANCE},
+      description = "Maximum number of pending requests after which the " +
+          "leader starts rejecting requests from client."
+  )
+  private int leaderNumPendingRequests;
+
+  public int getLeaderNumPendingRequests() {
+    return leaderNumPendingRequests;
+  }
+
+  public void setLeaderNumPendingRequests(int leaderNumPendingRequests) {
+    this.leaderNumPendingRequests = leaderNumPendingRequests;
+  }
+}
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
similarity index 87%
copy from hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less
copy to hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
index c8e74d2..f4e6a38 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/Overview/Overview.less
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/conf/package-info.java
@@ -16,12 +16,11 @@
  * limitations under the License.
  */
 
-.overview-content {
-  margin: 20px 5px;
-  .icon-small {
-    font-size: 16px;
-  }
-  .meta {
-    font-size: 12px;
-  }
-}
+/**
+ This package contains class related to configuration.
+ */
+
+package org.apache.hadoop.ozone.conf;
+
+
+
diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
index 91c63d1..88df770 100644
--- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
+++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto
@@ -59,6 +59,7 @@ message ScmContainerLocationRequest {
   optional StartReplicationManagerRequestProto startReplicationManagerRequest = 21;
   optional StopReplicationManagerRequestProto stopReplicationManagerRequest = 22;
   optional ReplicationManagerStatusRequestProto seplicationManagerStatusRequest = 23;
+  optional GetPipelineRequestProto getPipelineRequest = 24;
 
 }
 
@@ -91,6 +92,7 @@ message ScmContainerLocationResponse {
   optional StartReplicationManagerResponseProto startReplicationManagerResponse = 21;
   optional StopReplicationManagerResponseProto stopReplicationManagerResponse = 22;
   optional ReplicationManagerStatusResponseProto replicationManagerStatusResponse = 23;
+  optional GetPipelineResponseProto getPipelineResponse = 24;
   enum Status {
     OK = 1;
     CONTAINER_ALREADY_EXISTS = 2;
@@ -118,6 +120,7 @@ enum Type {
   StartReplicationManager = 16;
   StopReplicationManager = 17;
   GetReplicationManagerStatus = 18;
+  GetPipeline = 19;
 }
 
 /**
@@ -241,6 +244,15 @@ message ListPipelineResponseProto {
   repeated Pipeline pipelines = 1;
 }
 
+message GetPipelineRequestProto {
+  required PipelineID pipelineID = 1;
+  optional string traceID = 2;
+}
+
+message GetPipelineResponseProto {
+  required Pipeline pipeline = 1;
+}
+
 message ActivatePipelineRequestProto {
   required PipelineID pipelineID = 1;
   optional string traceID = 2;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 34e3c7e..075cb3e 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -204,14 +204,6 @@
     </description>
   </property>
   <property>
-    <name>dfs.container.ratis.leader.num.pending.requests</name>
-    <value>4096</value>
-    <tag>OZONE, RATIS, PERFORMANCE</tag>
-    <description>Maximum number of pending requests after which the leader
-      starts rejecting requests from client.
-    </description>
-  </property>
-  <property>
     <name>dfs.container.ratis.leader.pending.bytes.limit</name>
     <value>1GB</value>
     <tag>OZONE, RATIS, PERFORMANCE</tag>
@@ -253,14 +245,6 @@
     </description>
   </property>
   <property>
-    <name>dfs.ratis.client.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis client request.It should be
-        set greater than leader election timeout in Ratis.
-    </description>
-  </property>
-  <property>
     <name>dfs.ratis.client.request.max.retries</name>
     <value>180</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
@@ -280,12 +264,6 @@
     <description>Retry Cache entry timeout for ratis server.</description>
   </property>
   <property>
-    <name>dfs.ratis.server.request.timeout.duration</name>
-    <value>3s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server request.</description>
-  </property>
-  <property>
     <name>dfs.ratis.leader.election.minimum.timeout.duration</name>
     <value>5s</value>
     <tag>OZONE, RATIS, MANAGEMENT</tag>
@@ -294,15 +272,6 @@
     </description>
   </property>
   <property>
-    <name>dfs.ratis.server.failure.duration</name>
-    <value>120s</value>
-    <tag>OZONE, RATIS, MANAGEMENT</tag>
-    <description>The timeout duration for ratis server failure detection,
-      once the threshold has reached, the ratis state machine will be informed
-      about the failure in the ratis ring
-    </description>
-  </property>
-  <property>
     <name>hdds.node.report.interval</name>
     <value>60000ms</value>
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
@@ -436,14 +405,6 @@
     </description>
   </property>
   <property>
-    <name>ozone.client.watch.request.timeout</name>
-    <value>30s</value>
-    <tag>OZONE, CLIENT</tag>
-    <description>Timeout for the watch API in Ratis client to acknowledge
-      a particular request getting replayed to all servers.
-    </description>
-  </property>
-  <property>
     <name>ozone.client.max.retries</name>
     <value>100</value>
     <tag>OZONE, CLIENT</tag>
@@ -483,10 +444,8 @@
     <value></value>
     <tag>OM, HA</tag>
     <description>
-      Comma-separated list of OM service Ids.
-
-      If not set, the default value of "omServiceIdDefault" is assigned as the
-      OM service ID.
+      Comma-separated list of OM service Ids. This property allows the client
+      to figure out quorum of OzoneManager address.
     </description>
   </property>
   <property>
@@ -2459,4 +2418,19 @@
       The maximum number of keys to return for a list trash request.
     </description>
   </property>
+  <property>
+    <name>ozone.http.basedir</name>
+    <value></value>
+    <tag>OZONE, OM, SCM, MANAGEMENT</tag>
+    <description>
+      The base dir for HTTP Jetty server to extract contents. If this property
+      is not configured, by default, Jetty will create a directory inside the
+      directory named by the java.io.tmpdir System property(/tmp by default).
+      While in production environment, it's strongly suggested to instruct Jetty
+      to use a different parent directory by setting this property to the name
+      of the desired parent directory. The value of the property will be used to
+      set Jetty context attribute 'org.eclipse.jetty.webapp.basetempdir'.
+      The directory named by this property must exist and be writeable.
+    </description>
+  </property>
 </configuration>
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageCheckFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageCheckFactory.java
new file mode 100644
index 0000000..3b2e641
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageCheckFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import java.io.File;
+import java.time.Duration;
+
+/**
+ * {@link SpaceUsageCheckFactory} implementations for testing.
+ */
+public final class MockSpaceUsageCheckFactory {
+
+  public static final SpaceUsageCheckFactory NONE = new None();
+
+  /**
+   * Creates a factory that uses the specified parameters for all directories.
+   */
+  public static SpaceUsageCheckFactory of(SpaceUsageSource source,
+      Duration refresh, SpaceUsagePersistence persistence) {
+    return dir -> new SpaceUsageCheckParams(dir, source, refresh, persistence);
+  }
+
+  /**
+   * An implementation that never checks space usage but reports basically
+   * unlimited free space.  Neither does it persist space usage info.
+   */
+  public static class None implements SpaceUsageCheckFactory {
+    @Override
+    public SpaceUsageCheckParams paramsFor(File dir) {
+      return new SpaceUsageCheckParams(dir,
+          MockSpaceUsageSource.unlimited(),
+          Duration.ZERO,
+          SpaceUsagePersistence.None.INSTANCE
+      );
+    }
+  }
+
+  private MockSpaceUsageCheckFactory() {
+    throw new UnsupportedOperationException("no instances");
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageCheckParams.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageCheckParams.java
new file mode 100644
index 0000000..f83d858
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageCheckParams.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import java.io.File;
+import java.time.Duration;
+
+/**
+ * {@link SpaceUsageCheckParams} builder for testing.
+ */
+public final class MockSpaceUsageCheckParams {
+
+  public static Builder newBuilder(File dir) {
+    return new Builder(dir);
+  }
+
+  /**
+   * Builder of {@link SpaceUsageCheckParams} for testing.
+   */
+  public static final class Builder {
+
+    private final File dir;
+    private SpaceUsageSource source = MockSpaceUsageSource.unlimited();
+    private Duration refresh = Duration.ZERO;
+    private SpaceUsagePersistence persistence =
+        SpaceUsagePersistence.None.INSTANCE;
+
+    private Builder(File dir) {
+      this.dir = dir;
+    }
+
+    public Builder withSource(SpaceUsageSource newSource) {
+      this.source = newSource;
+      return this;
+    }
+
+    public Builder withRefresh(Duration newRefresh) {
+      this.refresh = newRefresh;
+      return this;
+    }
+
+    public Builder withPersistence(SpaceUsagePersistence newPersistence) {
+      this.persistence = newPersistence;
+      return this;
+    }
+
+    public SpaceUsageCheckParams build() {
+      return new SpaceUsageCheckParams(dir, source, refresh, persistence);
+    }
+  }
+
+  private MockSpaceUsageCheckParams() {
+    throw new UnsupportedOperationException("no instances");
+  }
+
+}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsagePersistence.java
similarity index 50%
rename from hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
rename to hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsagePersistence.java
index 611f287..c57297d 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsagePersistence.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -15,24 +15,41 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.hadoop.ozone.recon.scm;
+package org.apache.hadoop.hdds.fs;
 
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
-import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.server.events.EventPublisher;
+import java.util.OptionalLong;
+import java.util.concurrent.atomic.AtomicLong;
 
 /**
- * Recon's version of SCM Node Manager.
- * TODO This is just an initial implementation. Will be revisited in future.
+ * {@link SpaceUsagePersistence} implementations for testing.
  */
-public class ReconNodeManager extends SCMNodeManager {
+public final class MockSpaceUsagePersistence {
+
+  public static SpaceUsagePersistence inMemory(AtomicLong target) {
+    return new Memory(target);
+  }
+
+  private static class Memory implements SpaceUsagePersistence {
+
+    private final AtomicLong target;
 
-  public ReconNodeManager(OzoneConfiguration conf,
-                          SCMStorageConfig scmStorageConfig,
-                          EventPublisher eventPublisher,
-                          NetworkTopology networkTopology) {
-    super(conf, scmStorageConfig, eventPublisher, networkTopology);
+    Memory(AtomicLong target) {
+      this.target = target;
+    }
+
+    @Override
+    public OptionalLong load() {
+      return OptionalLong.of(target.get());
+    }
+
+    @Override
+    public void save(SpaceUsageSource source) {
+      target.set(source.getUsedSpace());
+    }
   }
+
+  private MockSpaceUsagePersistence() {
+    throw new UnsupportedOperationException("no instances");
+  }
+
 }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java
new file mode 100644
index 0000000..4055f08
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/MockSpaceUsageSource.java
@@ -0,0 +1,74 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+/**
+ * {@link SpaceUsageSource} implementations for testing.
+ */
+public final class MockSpaceUsageSource {
+
+  public static SpaceUsageSource zero() {
+    return fixed(0, 0);
+  }
+
+  public static SpaceUsageSource unlimited() {
+    return fixed(Long.MAX_VALUE, Long.MAX_VALUE);
+  }
+
+  public static SpaceUsageSource fixed(long capacity, long available) {
+    return fixed(capacity, available, capacity - available);
+  }
+
+  public static SpaceUsageSource fixed(long capacity, long available,
+      long used) {
+    return new Fixed(capacity, available, used);
+  }
+
+  private static final class Fixed implements SpaceUsageSource {
+
+    private final long capacity;
+    private final long available;
+    private final long used;
+
+    Fixed(long capacity, long available, long used) {
+      this.capacity = capacity;
+      this.available = available;
+      this.used = used;
+    }
+
+    @Override
+    public long getCapacity() {
+      return capacity;
+    }
+
+    @Override
+    public long getAvailable() {
+      return available;
+    }
+
+    @Override
+    public long getUsedSpace() {
+      return used;
+    }
+  }
+
+  private MockSpaceUsageSource() {
+    throw new UnsupportedOperationException("no instances");
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
new file mode 100644
index 0000000..c29f653
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestCachingSpaceUsageSource.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.Builder;
+import org.junit.Test;
+import org.mockito.stubbing.Answer;
+
+import java.io.File;
+import java.time.Duration;
+import java.util.concurrent.ScheduledExecutorService;
+import java.util.concurrent.ScheduledFuture;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+
+import static org.apache.hadoop.hdds.fs.MockSpaceUsageCheckParams.newBuilder;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyLong;
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+/**
+ * Tests for {@link CachingSpaceUsageSource}.
+ */
+public class TestCachingSpaceUsageSource {
+
+  private static final File DIR =
+      getTestDir(TestCachingSpaceUsageSource.class.getSimpleName());
+
+  @Test
+  public void providesInitialValueUntilStarted() {
+    final long initialValue = validInitialValue();
+    SpaceUsageCheckParams params = paramsBuilder(new AtomicLong(initialValue))
+        .withRefresh(Duration.ZERO)
+        .build();
+
+    SpaceUsageSource subject = new CachingSpaceUsageSource(params);
+
+    assertEquals(initialValue, subject.getUsedSpace());
+  }
+
+  @Test
+  public void ignoresMissingInitialValue() {
+    SpaceUsageCheckParams params = paramsBuilder()
+        .withRefresh(Duration.ZERO)
+        .build();
+
+    SpaceUsageSource subject = new CachingSpaceUsageSource(params);
+
+    assertEquals(0, subject.getUsedSpace());
+  }
+
+  @Test
+  public void updatesValueFromSourceUponStartIfPeriodicRefreshNotConfigured() {
+    AtomicLong savedValue = new AtomicLong(validInitialValue());
+    SpaceUsageCheckParams params = paramsBuilder(savedValue)
+        .withRefresh(Duration.ZERO).build();
+
+    CachingSpaceUsageSource subject = new CachingSpaceUsageSource(params);
+    subject.start();
+
+    assertSubjectWasRefreshed(params.getSource().getUsedSpace(), subject);
+  }
+
+  @Test
+  public void schedulesRefreshWithDelayIfConfigured() {
+    long initialValue = validInitialValue();
+    AtomicLong savedValue = new AtomicLong(initialValue);
+    SpaceUsageCheckParams params = paramsBuilder(savedValue)
+        .build();
+    Duration refresh = params.getRefresh();
+    ScheduledExecutorService executor = sameThreadExecutorWithoutDelay();
+
+    CachingSpaceUsageSource subject =
+        new CachingSpaceUsageSource(params, executor);
+    subject.start();
+
+    verifyRefreshWasScheduled(executor, refresh.toMillis(), refresh);
+    assertSubjectWasRefreshed(params.getSource().getUsedSpace(), subject);
+    assertEquals(initialValue, savedValue.get(),
+        "value should not have been saved to file yet");
+  }
+
+  @Test
+  public void schedulesImmediateRefreshIfInitialValueMissing() {
+    final long initialValue = missingInitialValue();
+    AtomicLong savedValue = new AtomicLong(initialValue);
+    SpaceUsageCheckParams params = paramsBuilder(savedValue).build();
+    ScheduledExecutorService executor = sameThreadExecutorWithoutDelay();
+
+    CachingSpaceUsageSource subject =
+        new CachingSpaceUsageSource(params, executor);
+    subject.start();
+
+    verifyRefreshWasScheduled(executor, 0L, params.getRefresh());
+    assertSubjectWasRefreshed(params.getSource().getUsedSpace(), subject);
+    assertEquals(initialValue, savedValue.get(),
+        "value should not have been saved to file yet");
+  }
+
+  @Test
+  public void savesValueOnShutdown() {
+    AtomicLong savedValue = new AtomicLong(validInitialValue());
+    SpaceUsageSource source = mock(SpaceUsageSource.class);
+    final long usedSpace = 4L;
+    when(source.getUsedSpace()).thenReturn(usedSpace, 5L, 6L);
+    SpaceUsageCheckParams params = paramsBuilder(savedValue).withSource(source)
+        .build();
+    ScheduledFuture<?> future = mock(ScheduledFuture.class);
+    ScheduledExecutorService executor = sameThreadExecutorWithoutDelay(future);
+
+    CachingSpaceUsageSource subject =
+        new CachingSpaceUsageSource(params, executor);
+    subject.start();
+    subject.shutdown();
+
+    assertEquals(usedSpace, savedValue.get(),
+        "value should have been saved to file");
+    assertEquals(usedSpace, subject.getUsedSpace(),
+        "no further updates from source expected");
+    verify(future).cancel(true);
+    verify(executor).shutdown();
+  }
+
+  private static long missingInitialValue() {
+    return 0L;
+  }
+
+  private static long validInitialValue() {
+    return RandomUtils.nextLong(1, 100);
+  }
+
+  private static Builder paramsBuilder(AtomicLong savedValue) {
+    return paramsBuilder()
+        .withPersistence(MockSpaceUsagePersistence.inMemory(savedValue));
+  }
+
+  private static Builder paramsBuilder() {
+    return newBuilder(DIR)
+        .withSource(MockSpaceUsageSource.fixed(10000, 1000))
+        .withRefresh(Duration.ofMinutes(5));
+  }
+
+  private static ScheduledExecutorService sameThreadExecutorWithoutDelay() {
+    return sameThreadExecutorWithoutDelay(mock(ScheduledFuture.class));
+  }
+
+  private static ScheduledExecutorService sameThreadExecutorWithoutDelay(
+      ScheduledFuture<?> result) {
+
+    ScheduledExecutorService executor = mock(ScheduledExecutorService.class);
+    when(executor.scheduleWithFixedDelay(any(), anyLong(),
+        anyLong(), any()))
+        .thenAnswer((Answer<ScheduledFuture<?>>) invocation -> {
+          Runnable task = invocation.getArgument(0);
+          task.run();
+          return result;
+        });
+    return executor;
+  }
+
+  private static void verifyRefreshWasScheduled(
+      ScheduledExecutorService executor, long expectedInitialDelay,
+      Duration refresh) {
+
+    verify(executor).scheduleWithFixedDelay(any(), eq(expectedInitialDelay),
+        eq(refresh.toMillis()), eq(TimeUnit.MILLISECONDS));
+  }
+
+  private static void assertSubjectWasRefreshed(long expected,
+      SpaceUsageSource subject) {
+
+    assertEquals(expected, subject.getUsedSpace(),
+        "subject should have been refreshed");
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
new file mode 100644
index 0000000..86d3838
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDU.java
@@ -0,0 +1,112 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.util.Shell;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static org.apache.hadoop.ozone.OzoneConsts.KB;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assumptions.assumeFalse;
+import static org.junit.jupiter.api.Assumptions.assumeTrue;
+
+import java.io.File;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.util.Random;
+
+/**
+ * Tests for {@link DU}.
+ */
+public class TestDU {
+
+  private static final File DIR = getTestDir(TestDU.class.getSimpleName());
+
+  @Before
+  public void setUp() {
+    assumeFalse(Shell.WINDOWS);
+    FileUtil.fullyDelete(DIR);
+    assertTrue(DIR.mkdirs());
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    FileUtil.fullyDelete(DIR);
+  }
+
+  static void createFile(File newFile, int size) throws IOException {
+    // write random data so that filesystems with compression enabled (e.g. ZFS)
+    // can't compress the file
+    Random random = new Random();
+    byte[] data = new byte[size];
+    random.nextBytes(data);
+
+    assumeTrue(newFile.createNewFile());
+    RandomAccessFile file = new RandomAccessFile(newFile, "rws");
+
+    file.write(data);
+
+    file.getFD().sync();
+    file.close();
+  }
+
+  /**
+   * Verify that du returns expected used space for a file.
+   * We assume here that if a file system crates a file of size
+   * that is a multiple of the block size in this file system,
+   * then the used size for the file will be exactly that size.
+   * This is true for most file systems.
+   */
+  @Test
+  public void testGetUsed() throws Exception {
+    final long writtenSize = 32 * KB;
+    File file = new File(DIR, "data");
+    createFile(file, (int) writtenSize);
+
+    SpaceUsageSource du = new DU(file);
+    long duSize = du.getUsedSpace();
+
+    assertFileSize(writtenSize, duSize);
+  }
+
+  @Test
+  public void testExcludePattern() throws IOException {
+    createFile(new File(DIR, "include.txt"), (int) (4 * KB));
+    createFile(new File(DIR, "exclude.tmp"), (int) (100 * KB));
+    SpaceUsageSource du = new DU(DIR, "*.tmp");
+
+    long usedSpace = du.getUsedSpace();
+
+    assertFileSize(4*KB, usedSpace);
+  }
+
+  private static void assertFileSize(long expected, long actual) {
+    // Allow for extra 8K on-disk slack for local file systems
+    // that may store additional file metadata (eg ext attrs).
+    final long max = expected + 8 * KB;
+    assertTrue(expected <= actual && actual <= max, () ->
+        String.format(
+            "Invalid on-disk size: %d, expected to be in [%d, %d]",
+            actual, expected, max));
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
new file mode 100644
index 0000000..e651174
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDUFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.io.File;
+import java.time.Duration;
+
+import static org.apache.hadoop.hdds.fs.DUFactory.Conf.configKeyForRefreshPeriod;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+
+/**
+ * Tests for {@link DUFactory}.
+ */
+public class TestDUFactory {
+
+  @Test
+  public void testCreateViaConfig() {
+    TestSpaceUsageFactory.testCreateViaConfig(DUFactory.class);
+  }
+
+  @Test
+  public void testParams() {
+    Configuration conf = new Configuration();
+    conf.set(configKeyForRefreshPeriod(), "1h");
+    File dir = getTestDir(getClass().getSimpleName());
+
+    SpaceUsageCheckParams params = new DUFactory()
+        .setConfiguration(conf)
+        .paramsFor(dir);
+
+    assertSame(dir, params.getDir());
+    assertEquals(Duration.ofHours(1), params.getRefresh());
+    assertSame(DU.class, params.getSource().getClass());
+    assertSame(SaveSpaceUsageToFile.class, params.getPersistence().getClass());
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
new file mode 100644
index 0000000..a7b653b
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsage.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.fs.FileUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+
+import static org.apache.hadoop.hdds.fs.TestDU.createFile;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests for {@link DedicatedDiskSpaceUsage}.
+ */
+public class TestDedicatedDiskSpaceUsage {
+
+  private static final File DIR =
+      getTestDir(TestDedicatedDiskSpaceUsage.class.getSimpleName());
+
+  private static final int FILE_SIZE = 1024;
+
+  @Before
+  public void setUp() {
+    FileUtil.fullyDelete(DIR);
+    assertTrue(DIR.mkdirs());
+  }
+
+  @After
+  public void tearDown() throws IOException {
+    FileUtil.fullyDelete(DIR);
+  }
+
+  @Test
+  public void testGetUsed() throws IOException {
+    File file = new File(DIR, "data");
+    createFile(file, FILE_SIZE);
+    SpaceUsageSource subject = new DedicatedDiskSpaceUsage(DIR);
+
+    // condition comes from TestDFCachingGetSpaceUsed in Hadoop Common
+    assertTrue(subject.getUsedSpace() >= FILE_SIZE - 20);
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
new file mode 100644
index 0000000..e3015b5
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestDedicatedDiskSpaceUsageFactory.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import java.io.File;
+import java.time.Duration;
+
+import static org.apache.hadoop.hdds.fs.DedicatedDiskSpaceUsageFactory.Conf.configKeyForRefreshPeriod;
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertSame;
+
+/**
+ * Tests for {@link DedicatedDiskSpaceUsageFactory}.
+ */
+public class TestDedicatedDiskSpaceUsageFactory {
+
+  @Test
+  public void testCreateViaConfig() {
+    TestSpaceUsageFactory.testCreateViaConfig(
+        DedicatedDiskSpaceUsageFactory.class);
+  }
+
+  @Test
+  public void testParams() {
+    Configuration conf = new Configuration();
+    conf.set(configKeyForRefreshPeriod(), "2m");
+    File dir = getTestDir(getClass().getSimpleName());
+
+    SpaceUsageCheckParams params = new DedicatedDiskSpaceUsageFactory()
+        .setConfiguration(conf)
+        .paramsFor(dir);
+
+    assertSame(dir, params.getDir());
+    assertEquals(Duration.ofMinutes(2), params.getRefresh());
+    assertSame(DedicatedDiskSpaceUsage.class, params.getSource().getClass());
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
new file mode 100644
index 0000000..0e0051f
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSaveSpaceUsageToFile.java
@@ -0,0 +1,150 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.commons.io.FileUtils;
+import org.apache.hadoop.fs.FileUtil;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.charset.StandardCharsets;
+import java.time.Duration;
+import java.time.Instant;
+import java.util.OptionalLong;
+
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
+import static org.apache.hadoop.test.GenericTestUtils.waitFor;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+/**
+ * Tests for {@link SaveSpaceUsageToFile}.
+ */
+public class TestSaveSpaceUsageToFile {
+
+  private static final File DIR =
+      getTestDir(TestSaveSpaceUsageToFile.class.getSimpleName());
+
+  private static final Duration LONG_EXPIRY = Duration.ofMinutes(15);
+
+  private static final SpaceUsageSource VALID_USAGE_SOURCE =
+      MockSpaceUsageSource.fixed(123, 81);
+
+  private File file;
+
+  @Before
+  public void setup() {
+    FileUtil.fullyDelete(DIR);
+    assertTrue(DIR.mkdirs());
+    file = new File(DIR, "space_usage.txt");
+  }
+
+  @After
+  public void cleanup() {
+    FileUtil.fullyDelete(DIR);
+  }
+
+  @Test
+  public void persistsValidValue() {
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, LONG_EXPIRY);
+
+    subject.save(VALID_USAGE_SOURCE);
+    OptionalLong savedValue = subject.load();
+
+    assertTrue(file.exists());
+    assertTrue(savedValue.isPresent());
+    assertEquals(VALID_USAGE_SOURCE.getUsedSpace(), savedValue.getAsLong());
+  }
+
+  @Test
+  public void doesNotSaveInvalidValue() {
+    SpaceUsageSource source = MockSpaceUsageSource.fixed(123, 123);
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, LONG_EXPIRY);
+
+    subject.save(source);
+    OptionalLong savedValue = subject.load();
+
+    assertFalse(file.exists());
+    assertFalse(savedValue.isPresent());
+  }
+
+  @Test
+  public void doesNotLoadExpiredValue() throws Exception {
+    Duration shortExpiry = Duration.ofMillis(5);
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, shortExpiry);
+
+    subject.save(VALID_USAGE_SOURCE);
+    Instant expired = Instant.now().plus(shortExpiry);
+    waitFor(() -> Instant.now().isAfter(expired), 10, 1000);
+    OptionalLong savedValue = subject.load();
+
+    assertTrue(file.exists());
+    assertFalse(savedValue.isPresent());
+  }
+
+  @Test
+  public void doesNotLoadIfTimeMissing() throws IOException {
+    saveToFile(Long.toString(VALID_USAGE_SOURCE.getUsedSpace()));
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, LONG_EXPIRY);
+
+    OptionalLong savedValue = subject.load();
+
+    assertFalse(savedValue.isPresent());
+  }
+
+  @Test
+  public void doesNotLoadIfFileMissing() {
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, LONG_EXPIRY);
+
+    OptionalLong savedValue = subject.load();
+
+    assertFalse(file.exists());
+    assertFalse(savedValue.isPresent());
+  }
+
+  @Test
+  public void doesNotLoadGarbage() throws IOException {
+    saveToFile("garbage 456");
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, LONG_EXPIRY);
+
+    OptionalLong savedValue = subject.load();
+
+    assertFalse(savedValue.isPresent());
+  }
+
+  @Test
+  public void overwritesExistingFile() throws IOException {
+    saveToFile("456 " + Instant.now().toEpochMilli());
+    SpaceUsagePersistence subject = new SaveSpaceUsageToFile(file, LONG_EXPIRY);
+
+    subject.save(VALID_USAGE_SOURCE);
+    OptionalLong savedValue = subject.load();
+
+    assertTrue(savedValue.isPresent());
+    assertEquals(VALID_USAGE_SOURCE.getUsedSpace(), savedValue.getAsLong());
+  }
+
+  private void saveToFile(String content) throws IOException {
+    FileUtils.writeStringToFile(file, content, StandardCharsets.UTF_8);
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
new file mode 100644
index 0000000..09b8cc2
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/fs/TestSpaceUsageFactory.java
@@ -0,0 +1,197 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.fs;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+
+import static org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory.Conf.configKeyForClassName;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+
+/**
+ * Tests for {@link SpaceUsageCheckFactory}.
+ */
+public class TestSpaceUsageFactory {
+
+  private LogCapturer capturer;
+
+  /**
+   * Verifies that {@link SpaceUsageCheckFactory#create(Configuration)} creates
+   * the correct implementation if configured.  This should be called from each
+   * specific implementation's test class.
+   * @return the instance created, so that further checks can done, if needed
+   */
+  protected static <T extends SpaceUsageCheckFactory> T testCreateViaConfig(
+      Class<T> factoryClass) {
+
+    Configuration conf = configFor(factoryClass);
+
+    SpaceUsageCheckFactory factory = SpaceUsageCheckFactory.create(conf);
+
+    assertSame(factoryClass, factory.getClass());
+
+    return factoryClass.cast(factory);
+  }
+
+  @Before
+  public void setUp() {
+    capturer = LogCapturer.captureLogs(
+        LoggerFactory.getLogger(SpaceUsageCheckFactory.class));
+  }
+
+  @Test
+  public void configuresFactoryInstance() {
+    SpyFactory factory = testCreateViaConfig(SpyFactory.class);
+
+    assertNotNull(factory.getConf());
+  }
+
+  @Test
+  public void returnsDefaultFactoryForMissingNoArgs() {
+    testDefaultFactoryForBrokenImplementation(MissingNoArgsConstructor.class);
+  }
+
+  @Test
+  public void returnsDefaultFactoryForPrivateConstructor() {
+    testDefaultFactoryForBrokenImplementation(PrivateConstructor.class);
+  }
+
+  @Test
+  public void returnsDefaultFactoryForMissingConfig() {
+    testDefaultFactoryForWrongConfig("");
+  }
+
+  @Test
+  public void returnsDefaultFactoryForUnknownClass() {
+    testDefaultFactoryForWrongConfig("no.such.class");
+  }
+
+  @Test
+  public void returnsDefaultFactoryForClassThatDoesNotImplementInterface() {
+    testDefaultFactoryForWrongConfig("java.lang.String");
+  }
+
+  private void assertNoLog() {
+    assertEquals("", capturer.getOutput());
+  }
+
+  private void assertLogged(String substring) {
+    String output = capturer.getOutput();
+    assertTrue(output.contains(substring), () -> "Expected " + substring + " " +
+        "in log output, but only got: " + output);
+  }
+
+  private static <T extends SpaceUsageCheckFactory> Configuration configFor(
+      Class<T> factoryClass) {
+
+    Configuration conf = new Configuration();
+    conf.setClass(configKeyForClassName(),
+        factoryClass, SpaceUsageCheckFactory.class);
+
+    return conf;
+  }
+
+  private static void testDefaultFactoryForBrokenImplementation(
+      Class<? extends SpaceUsageCheckFactory> brokenImplementationClass) {
+    Configuration conf = configFor(brokenImplementationClass);
+    assertCreatesDefaultImplementation(conf);
+  }
+
+  private void testDefaultFactoryForWrongConfig(String value) {
+    Configuration conf = new Configuration();
+    conf.set(configKeyForClassName(), value);
+
+    assertCreatesDefaultImplementation(conf);
+
+    if (value == null || value.isEmpty()) {
+      assertNoLog();
+    } else {
+      assertLogged(value);
+    }
+  }
+
+  private static void assertCreatesDefaultImplementation(Configuration conf) {
+    // given
+    // conf
+
+    // when
+    SpaceUsageCheckFactory factory = SpaceUsageCheckFactory.create(conf);
+
+    // then
+    assertSame(SpaceUsageCheckFactory.defaultImplementation().getClass(),
+        factory.getClass());
+  }
+
+  /**
+   * Base class for broken {@code SpaceUsageCheckFactory} implementations
+   * (for test).
+   */
+  protected static class BrokenFactoryImpl implements SpaceUsageCheckFactory {
+    @Override
+    public SpaceUsageCheckParams paramsFor(File dir) {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  /**
+   * This one has no no-args constructor.
+   */
+  public static final class MissingNoArgsConstructor extends BrokenFactoryImpl {
+    public MissingNoArgsConstructor(String ignored) { }
+  }
+
+  /**
+   * This one has a private constructor.
+   */
+  public static final class PrivateConstructor extends BrokenFactoryImpl {
+    private PrivateConstructor() { }
+  }
+
+  /**
+   * Spy factory to verify {@link SpaceUsageCheckFactory#create(Configuration)}
+   * properly configures it.
+   */
+  public static final class SpyFactory implements SpaceUsageCheckFactory {
+
+    private Configuration conf;
+
+    @Override
+    public SpaceUsageCheckFactory setConfiguration(Configuration config) {
+      this.conf = config;
+      return this;
+    }
+
+    @Override
+    public SpaceUsageCheckParams paramsFor(File dir) {
+      throw new UnsupportedOperationException();
+    }
+
+    public Configuration getConf() {
+      return conf;
+    }
+  }
+
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestRatisHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestRatisHelper.java
new file mode 100644
index 0000000..42e3c4b
--- /dev/null
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/ratis/TestRatisHelper.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.ratis;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.ratis.conf.RaftProperties;
+import org.junit.Assert;
+import org.junit.Test;
+
+/**
+ * Test RatisHelper class.
+ */
+public class TestRatisHelper {
+
+  @Test
+  public void testCreateRaftClientProperties() {
+
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set("raft.client.rpc.watch.request.timeout", "30s");
+    ozoneConfiguration.set("raft.client.rpc.request.timeout", "30s");
+
+    RaftProperties raftProperties = new RaftProperties();
+    RatisHelper.createRaftClientProperties(ozoneConfiguration, raftProperties);
+
+    Assert.assertEquals("30s",
+        raftProperties.get("raft.client.rpc.watch.request.timeout"));
+    Assert.assertEquals("30s",
+        raftProperties.get("raft.client.rpc.request.timeout"));
+
+  }
+
+  @Test
+  public void testCreateRaftGrpcProperties() {
+
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set("raft.grpc.message.size.max", "30MB");
+    ozoneConfiguration.set("raft.grpc.flow.control.window", "1MB");
+    ozoneConfiguration.set("raft.grpc.tls.enabled", "true");
+    ozoneConfiguration.set("raft.grpc.tls.mutual_authn.enabled", "true");
+    ozoneConfiguration.set("raft.grpc.server.port", "100");
+
+    RaftProperties raftProperties = new RaftProperties();
+    RatisHelper.createRaftGrpcProperties(ozoneConfiguration, raftProperties);
+
+    Assert.assertEquals("30MB",
+        raftProperties.get("raft.grpc.message.size.max"));
+    Assert.assertEquals("1MB",
+        raftProperties.get("raft.grpc.flow.control.window"));
+
+    // As we dont match tls and server raft.grpc properties. So they should
+    // be null.
+    Assert.assertNull(raftProperties.get("raft.grpc.tls.set"));
+    Assert.assertNull(raftProperties.get("raft.grpc.tls.mutual_authn.enabled"));
+    Assert.assertNull(raftProperties.get("raft.grpc.server.port"));
+
+  }
+
+
+  @Test
+  public void testCreateRaftServerGrpcProperties() {
+
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set("datanode.ratis.raft.grpc.message.size.max", "30MB");
+    ozoneConfiguration.set("datanode.ratis.raft.grpc.flow.control.window",
+        "1MB");
+    ozoneConfiguration.set("datanode.ratis.raft.grpc.tls.enabled", "true");
+    ozoneConfiguration.set("datanode.ratis.raft.grpc.tls.mutual_authn" +
+        ".enabled", "true");
+    ozoneConfiguration.set("datanode.ratis.raft.grpc.server.port", "100");
+
+    RaftProperties raftProperties = new RaftProperties();
+    RatisHelper.createRaftServerGrpcProperties(ozoneConfiguration,
+        raftProperties);
+
+    Assert.assertEquals("30MB",
+        raftProperties.get("raft.grpc.message.size.max"));
+    Assert.assertEquals("1MB",
+        raftProperties.get("raft.grpc.flow.control.window"));
+    Assert.assertEquals("true",
+        raftProperties.get("raft.grpc.tls.enabled"));
+    Assert.assertEquals("true",
+        raftProperties.get("raft.grpc.tls.mutual_authn.enabled"));
+    Assert.assertEquals("100",
+        raftProperties.get("raft.grpc.server.port"));
+
+  }
+
+  @Test
+  public void testCreateRaftServerProperties() {
+
+    OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+    ozoneConfiguration.set(
+        "datanode.ratis.raft.server.rpc.watch.request.timeout", "30s");
+    ozoneConfiguration.set(
+        "datanode.ratis.raft.server.rpc.request.timeout", "30s");
+
+    RaftProperties raftProperties = new RaftProperties();
+    RatisHelper.createRaftServerProperties(ozoneConfiguration, raftProperties);
+
+    Assert.assertEquals("30s",
+        raftProperties.get("raft.server.rpc.watch.request.timeout"));
+    Assert.assertEquals("30s",
+        raftProperties.get("raft.server.rpc.request.timeout"));
+
+  }
+}
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
index 77a2cec..7a8db14 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/token/TestOzoneBlockTokenIdentifier.java
@@ -45,7 +45,6 @@ import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.security.token.Token;
@@ -163,12 +162,12 @@ public class TestOzoneBlockTokenIdentifier {
     byte[] signedToken = signTokenAsymmetric(tokenId, privateKey);
 
 
-    Token<BlockTokenIdentifier> token = new Token(tokenId.getBytes(),
+    Token<OzoneBlockTokenIdentifier> token = new Token(tokenId.getBytes(),
         signedToken, tokenId.getKind(), new Text("host:port"));
 
     String encodeToUrlString = token.encodeToUrlString();
 
-    Token<BlockTokenIdentifier>decodedToken = new Token();
+    Token<OzoneBlockTokenIdentifier>decodedToken = new Token();
     decodedToken.decodeFromUrlString(encodeToUrlString);
 
     OzoneBlockTokenIdentifier decodedTokenId = new OzoneBlockTokenIdentifier();
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
index 7cdb2dc..c8201c0 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/security/x509/certificate/utils/TestCRLCodec.java
@@ -20,16 +20,26 @@
 package org.apache.hadoop.hdds.security.x509.certificate.utils;
 
 import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertTrue;
 
+import java.io.BufferedReader;
+import java.io.ByteArrayInputStream;
 import java.io.File;
+import java.io.FileReader;
 import java.io.IOException;
+import java.io.InputStream;
 import java.math.BigInteger;
+import java.nio.file.Paths;
 import java.security.KeyPair;
 import java.security.NoSuchAlgorithmException;
 import java.security.NoSuchProviderException;
 import java.security.PrivateKey;
+import java.security.cert.CRLException;
 import java.security.cert.CertificateException;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509CRL;
 import java.time.LocalDate;
 import java.time.temporal.ChronoUnit;
 import java.util.Date;
@@ -64,12 +74,23 @@ public class TestCRLCodec {
   private SecurityConfig securityConfig;
   private X509CertificateHolder x509CertificateHolder;
   private KeyPair keyPair;
-  private static final String CRL_FILE_NAME = "RevocationList.crl";
   private static final String TMP_CERT_FILE_NAME = "pemcertificate.crt";
 
   @Rule
   public TemporaryFolder temporaryFolder = new TemporaryFolder();
   private File basePath;
+  private static final String TMP_CRL_ENTRY =
+      "-----BEGIN X509 CRL-----\n" +
+      "MIIBijB0AgEBMA0GCSqGSIb3DQEBCwUAMC0xDTALBgNVBAMMBHdxVG0xDTALBgNV\n" +
+      "BAsMBGVFY2gxDTALBgNVBAoMBHJpc1UXDTIwMDExNzE4NTcyMFowIjAgAgEBFw0y\n" +
+      "MDAxMTcxODU3MjBaMAwwCgYDVR0VBAMKAQIwDQYJKoZIhvcNAQELBQADggEBACRI\n" +
+      "i/nFK2/5rsNWAsYjT/Byhq6shQy+EjdvSzs2cezHbO2TKXnIhlHbvTp5JO/ClaGm\n" +
+      "yfdwH6OjQbujcjceSKGSDQwNm98/JsryUh17IWcKJa9dlqFSUCy7GTZaXK6a3nH8\n" +
+      "SNhcqzrR69lLc4vJZAy0FkmBCnjbdUX8I92ZHfNQNJaC4JQ8JFtjfzZCcQR9KZxw\n" +
+      "bVue37JByiTxmxoiiMZf3MpOccuWKsZzIr9Tiw9G9inPS8lxRXODruDtMTpR8NPB\n" +
+      "KL0Yg+JEV48v2GJ5kSObuawCD2uDDNpHDd6q2m1z6J69z5IYpWb8OHEyQT7J4u+b\n" +
+      "tPiRCAUQLW9BACm17xc=\n" +
+      "-----END X509 CRL-----\n";
 
   @Before
   public void init() throws NoSuchProviderException,
@@ -101,11 +122,117 @@ public class TestCRLCodec {
         builder.build(contentSignerBuilder.build(privateKey));
 
     CRLCodec crlCodec = new CRLCodec(securityConfig);
-    crlCodec.writeCRL(cRLHolder, CRL_FILE_NAME, true);
+    crlCodec.writeCRL(cRLHolder, this.securityConfig.getCrlName(), true);
 
     X509CRLEntryHolder entryHolder =
         cRLHolder.getRevokedCertificate(BigInteger.ONE);
     assertNotNull(entryHolder);
+
+    // verify file generation
+    File crlFile =
+        Paths.get(crlCodec.getLocation().toString(),
+                  this.securityConfig.getCrlName()).toFile();
+    assertTrue(crlFile.exists());
+
+    try (BufferedReader reader = new BufferedReader(new FileReader(crlFile))){
+
+      // Verify contents of the file
+      String header = reader.readLine();
+      assertEquals("-----BEGIN X509 CRL-----", header);
+
+      String footer = null;
+      String line = null;
+      while ((line = reader.readLine()) != null) {
+        footer = line;
+      }
+      assertEquals("-----END X509 CRL-----", footer);
+    }
+  }
+
+  @Test
+  public void testWriteCRLX509() throws IOException,
+      OperatorCreationException, CertificateException, CRLException {
+
+    X500Name issuer = x509CertificateHolder.getIssuer();
+    Date now = new Date();
+    X509v2CRLBuilder builder = new X509v2CRLBuilder(issuer, now);
+    builder.addCRLEntry(x509CertificateHolder.getSerialNumber(), now,
+                        CRLReason.cACompromise);
+
+    byte[] crlBytes = TMP_CRL_ENTRY.getBytes();
+    try (InputStream inStream = new ByteArrayInputStream(crlBytes)) {
+      CertificateFactory cf = CertificateFactory.getInstance("X.509");
+      X509CRL crl = (X509CRL)cf.generateCRL(inStream);
+
+      CRLCodec crlCodec = new CRLCodec(securityConfig);
+      crlCodec.writeCRL(crl);
+
+      // verify file generated or not
+      File crlFile =
+          Paths.get(crlCodec.getLocation().toString(),
+                    this.securityConfig.getCrlName()).toFile();
+
+      assertTrue(crlFile.exists());
+    }
+  }
+
+  @Test
+  public void testGetX509CRL() throws IOException,
+      OperatorCreationException, CertificateException, CRLException {
+
+    X500Name issuer = x509CertificateHolder.getIssuer();
+    Date now = new Date();
+    X509v2CRLBuilder builder = new X509v2CRLBuilder(issuer, now);
+    builder.addCRLEntry(x509CertificateHolder.getSerialNumber(), now,
+                        CRLReason.cACompromise);
+
+    JcaContentSignerBuilder contentSignerBuilder =
+        new JcaContentSignerBuilder(securityConfig.getSignatureAlgo());
+
+    contentSignerBuilder.setProvider(securityConfig.getProvider());
+    PrivateKey privateKey = keyPair.getPrivate();
+    X509CRLHolder cRLHolder =
+        builder.build(contentSignerBuilder.build(privateKey));
+
+    CRLCodec crlCodec = new CRLCodec(securityConfig);
+    crlCodec.writeCRL(cRLHolder, this.securityConfig.getCrlName(), true);
+
+    X509CRLEntryHolder entryHolder =
+        cRLHolder.getRevokedCertificate(BigInteger.ONE);
+    assertNotNull(entryHolder);
+
+    String pemEncodedString = crlCodec.getPEMEncodedString(cRLHolder);
+    assertNotNull(pemEncodedString);
+
+    // Verify header and footer of PEM encoded String
+    String header = "-----BEGIN X509 CRL-----";
+    String footer = "-----END X509 CRL-----";
+    assertTrue(pemEncodedString.contains(header));
+    assertTrue(pemEncodedString.contains(footer));
+  }
+
+  @Test
+  public void testGetX509CRLFromCRLHolder() throws IOException,
+      OperatorCreationException, CertificateException, CRLException {
+
+    X500Name issuer = x509CertificateHolder.getIssuer();
+    Date now = new Date();
+    X509v2CRLBuilder builder = new X509v2CRLBuilder(issuer, now);
+    builder.addCRLEntry(x509CertificateHolder.getSerialNumber(), now,
+                        CRLReason.cACompromise);
+
+    JcaContentSignerBuilder contentSignerBuilder =
+        new JcaContentSignerBuilder(securityConfig.getSignatureAlgo());
+
+    contentSignerBuilder.setProvider(securityConfig.getProvider());
+    PrivateKey privateKey = keyPair.getPrivate();
+    X509CRLHolder cRLHolder =
+        builder.build(contentSignerBuilder.build(privateKey));
+
+    CRLCodec crlCodec = new CRLCodec(securityConfig);
+
+    X509CRL crl = crlCodec.getX509CRL(cRLHolder);
+    assertNotNull(crl);
   }
 
   /**
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
index d24fcf5..bc53c7a 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/TestMetadataStore.java
@@ -16,28 +16,6 @@
  */
 package org.apache.hadoop.hdds.utils;
 
-import com.google.common.collect.Lists;
-import org.apache.commons.io.FileUtils;
-import org.apache.commons.lang3.tuple.ImmutablePair;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.DFSUtilClient;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
-import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.ExpectedException;
-import org.junit.runner.RunWith;
-import org.junit.runners.Parameterized;
-import org.slf4j.event.Level;
-
 import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
@@ -50,13 +28,32 @@ import java.util.NoSuchElementException;
 import java.util.UUID;
 import java.util.concurrent.atomic.AtomicInteger;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.StringUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.KeyPrefixFilter;
+import org.apache.hadoop.hdds.utils.MetadataKeyFilters.MetadataKeyFilter;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.test.GenericTestUtils;
+
+import com.google.common.collect.Lists;
 import static java.nio.charset.StandardCharsets.UTF_8;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.tuple.ImmutablePair;
 import static org.apache.hadoop.test.PlatformAssumptions.assumeNotWindows;
+import org.junit.After;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+import org.junit.runner.RunWith;
+import org.junit.runners.Parameterized;
 import static org.junit.runners.Parameterized.Parameters;
+import org.slf4j.event.Level;
 
 /**
  * Test class for ozone metadata store.
@@ -227,12 +224,12 @@ public class TestMetadataStore {
 
   private byte[] getBytes(String str) {
     return str == null ? null :
-        DFSUtilClient.string2Bytes(str);
+        StringUtils.string2Bytes(str);
   }
 
   private String getString(byte[] bytes) {
     return bytes == null ? null :
-        DFSUtilClient.bytes2String(bytes);
+        StringUtils.bytes2String(bytes);
   }
 
   @Test
@@ -420,20 +417,20 @@ public class TestMetadataStore {
   @Test
   public void testGetSequentialRangeKVs() throws IOException {
     MetadataKeyFilter suffixFilter = (preKey, currentKey, nextKey)
-        -> DFSUtil.bytes2String(currentKey).endsWith("2");
+        -> StringUtils.bytes2String(currentKey).endsWith("2");
     // Suppose to return a2 and b2
     List<Map.Entry<byte[], byte[]>> result =
         store.getRangeKVs(null, MAX_GETRANGE_LENGTH, suffixFilter);
     assertEquals(2, result.size());
-    assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey()));
-    assertEquals("b2", DFSUtil.bytes2String(result.get(1).getKey()));
+    assertEquals("a2", StringUtils.bytes2String(result.get(0).getKey()));
+    assertEquals("b2", StringUtils.bytes2String(result.get(1).getKey()));
 
     // Suppose to return just a2, because when it iterates to a3,
     // the filter no long matches and it should stop from there.
     result = store.getSequentialRangeKVs(null,
         MAX_GETRANGE_LENGTH, suffixFilter);
     assertEquals(1, result.size());
-    assertEquals("a2", DFSUtil.bytes2String(result.get(0).getKey()));
+    assertEquals("a2", StringUtils.bytes2String(result.get(0).getKey()));
   }
 
   @Test
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
index 4ba54e9..238b1d1 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestDBConfigFromFile.java
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hdds.utils.db;
 
 import org.apache.commons.io.FileUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
@@ -40,6 +39,8 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 
+import org.apache.hadoop.hdds.StringUtils;
+
 import static org.apache.hadoop.hdds.utils.db.DBConfigFromFile.getOptionsFileNameFromDB;
 
 /**
@@ -69,7 +70,7 @@ public class TestDBConfigFromFile {
   @Test
   public void readFromFile() throws IOException {
     final List<String> families =
-        Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+        Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
             "First", "Second", "Third",
             "Fourth", "Fifth",
             "Sixth");
@@ -95,7 +96,7 @@ public class TestDBConfigFromFile {
   @Test
   public void readFromFileInvalidConfig() throws IOException {
     final List<String> families =
-        Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+        Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
             "First", "Second", "Third",
             "Fourth", "Fifth",
             "Sixth");
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index 6084ae9..e162730 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -20,7 +20,6 @@
 package org.apache.hadoop.hdds.utils.db;
 
 import javax.management.MBeanServer;
-
 import java.io.IOException;
 import java.lang.management.ManagementFactory;
 import java.nio.charset.StandardCharsets;
@@ -32,8 +31,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Set;
 
-import org.apache.commons.codec.binary.StringUtils;
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.StringUtils;
 
 import org.apache.commons.lang3.RandomStringUtils;
 import org.junit.After;
@@ -54,7 +52,7 @@ import org.rocksdb.StatsLevel;
  */
 public class TestRDBStore {
   private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+      Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
           "First", "Second", "Third",
           "Fourth", "Fifth",
           "Sixth");
@@ -89,7 +87,6 @@ public class TestRDBStore {
       rdbStore.close();
     }
   }
-
   private void insertRandomData(RDBStore dbStore, int familyIndex)
       throws Exception {
     try (Table firstTable = dbStore.getTable(families.get(familyIndex))) {
@@ -308,7 +305,8 @@ public class TestRDBStore {
       long start = System.nanoTime();
       for (int i = 0; i < 50; i++) {
         Assert.assertTrue(db.get(
-            StringUtils.getBytesUtf16("key" + i))== null);
+            org.apache.commons.codec.binary.StringUtils
+                .getBytesUtf16("key" + i)) == null);
       }
       long end = System.nanoTime();
       long keyGetLatency = end - start;
@@ -316,7 +314,8 @@ public class TestRDBStore {
       start = System.nanoTime();
       for (int i = 0; i < 50; i++) {
         Assert.assertFalse(db.keyMayExist(
-            StringUtils.getBytesUtf16("key" + i), new StringBuilder()));
+            org.apache.commons.codec.binary.StringUtils
+                .getBytesUtf16("key" + i), new StringBuilder()));
       }
       end = System.nanoTime();
       long keyMayExistLatency = end - start;
@@ -332,10 +331,14 @@ public class TestRDBStore {
              new RDBStore(folder.newFolder(), options, configSet)) {
 
       try (Table firstTable = newStore.getTable(families.get(1))) {
-        firstTable.put(StringUtils.getBytesUtf16("Key1"), StringUtils
-            .getBytesUtf16("Value1"));
-        firstTable.put(StringUtils.getBytesUtf16("Key2"), StringUtils
-            .getBytesUtf16("Value2"));
+        firstTable.put(
+            org.apache.commons.codec.binary.StringUtils.getBytesUtf16("Key1"),
+            org.apache.commons.codec.binary.StringUtils
+                .getBytesUtf16("Value1"));
+        firstTable.put(
+            org.apache.commons.codec.binary.StringUtils.getBytesUtf16("Key2"),
+            org.apache.commons.codec.binary.StringUtils
+                .getBytesUtf16("Value2"));
       }
       Assert.assertTrue(
           newStore.getDb().getLatestSequenceNumber() == 2);
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
index a853b4d..c5beb97 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBTableStore.java
@@ -28,7 +28,7 @@ import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
 
-import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdds.StringUtils;
 
 import org.apache.commons.lang3.RandomStringUtils;
 import org.junit.After;
@@ -49,7 +49,7 @@ import org.rocksdb.StatsLevel;
 public class TestRDBTableStore {
   private static int count = 0;
   private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+      Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
           "First", "Second", "Third",
           "Fourth", "Fifth",
           "Sixth", "Seventh",
@@ -233,7 +233,7 @@ public class TestRDBTableStore {
     rocksDBOptions.setCreateIfMissing(true);
     rocksDBOptions.setCreateMissingColumnFamilies(true);
 
-    String tableName = DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY);
+    String tableName = StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY);
 
     Set<TableConfig> configSet = new HashSet<>();
     TableConfig newConfig = new TableConfig(tableName,
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
index 0959659..f7e3c3f 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/hdds/utils/db/TestTypedRDBTableStore.java
@@ -28,7 +28,8 @@ import java.util.List;
 import java.util.Set;
 
 import com.google.common.base.Optional;
-import org.apache.hadoop.hdfs.DFSUtil;
+
+import org.apache.hadoop.hdds.StringUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
 
@@ -53,7 +54,7 @@ import org.rocksdb.StatsLevel;
 public class TestTypedRDBTableStore {
   private static int count = 0;
   private final List<String> families =
-      Arrays.asList(DFSUtil.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
+      Arrays.asList(StringUtils.bytes2String(RocksDB.DEFAULT_COLUMN_FAMILY),
           "First", "Second", "Third",
           "Fourth", "Fifth",
           "Sixth", "Seven", "Eighth",
diff --git a/hadoop-hdds/container-service/pom.xml b/hadoop-hdds/container-service/pom.xml
index 82c4948..8fed74b 100644
--- a/hadoop-hdds/container-service/pom.xml
+++ b/hadoop-hdds/container-service/pom.xml
@@ -79,6 +79,20 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <version>2.0.4</version>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
index 1884d01..9d5ac68 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
@@ -17,11 +17,19 @@
  */
 package org.apache.hadoop.ozone;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
+import java.io.File;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.security.KeyPair;
+import java.security.cert.CertificateException;
+import java.util.Arrays;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicBoolean;
 
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.cli.GenericCli;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
@@ -37,7 +45,6 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClie
 import org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest;
 import org.apache.hadoop.hdds.server.RatisDropwizardExports;
 import org.apache.hadoop.hdds.tracing.TracingUtil;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
 import org.apache.hadoop.security.SecurityUtil;
@@ -46,7 +53,13 @@ import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.util.ServicePlugin;
 import org.apache.hadoop.util.StringUtils;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
 import io.prometheus.client.CollectorRegistry;
+import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate;
+import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
+import static org.apache.hadoop.util.ExitUtil.terminate;
 import org.apache.ratis.metrics.MetricRegistries;
 import org.apache.ratis.metrics.MetricsReporting;
 import org.bouncycastle.pkcs.PKCS10CertificationRequest;
@@ -54,21 +67,6 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import picocli.CommandLine.Command;
 
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.security.KeyPair;
-import java.security.cert.CertificateException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.UUID;
-import java.util.concurrent.atomic.AtomicBoolean;
-
-import static org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec.getX509Certificate;
-import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY;
-import static org.apache.hadoop.util.ExitUtil.terminate;
-
 /**
  * Datanode service plugin to start the HDDS container services.
  */
@@ -211,13 +209,15 @@ public class HddsDatanodeService extends GenericCli implements ServicePlugin {
             UserGroupInformation.AuthenticationMethod.KERBEROS)) {
           LOG.info("Ozone security is enabled. Attempting login for Hdds " +
                   "Datanode user. Principal: {},keytab: {}", conf.get(
-              DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY),
-              conf.get(DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY));
+              DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY),
+              conf.get(DFSConfigKeysLegacy.DFS_DATANODE_KEYTAB_FILE_KEY));
 
           UserGroupInformation.setConfiguration(conf);
 
-          SecurityUtil.login(conf, DFSConfigKeys.DFS_DATANODE_KEYTAB_FILE_KEY,
-              DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, hostname);
+          SecurityUtil
+              .login(conf, DFSConfigKeysLegacy.DFS_DATANODE_KEYTAB_FILE_KEY,
+                  DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY,
+                  hostname);
         } else {
           throw new AuthenticationException(SecurityUtil.
               getAuthenticationMethod(conf) + " authentication method not " +
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
index 9ea4adf..accf5fe 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerMetrics.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.container.common.helpers;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
@@ -90,7 +90,7 @@ public class ContainerMetrics {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     // Percentile measurement is off by default, by watching no intervals
     int[] intervals =
-             conf.getInts(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
+        conf.getInts(DFSConfigKeysLegacy.DFS_METRICS_PERCENTILES_INTERVALS_KEY);
     return ms.register(STORAGE_CONTAINER_METRICS,
                        "Storage Container Node Metrics",
                        new ContainerMetrics(intervals));
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 2c01f3a..37197e9 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -18,8 +18,14 @@ package org.apache.hadoop.ozone.container.common.statemachine;
 
 import com.google.common.base.Preconditions;
 import com.google.protobuf.GeneratedMessage;
+
+import java.util.HashMap;
+import java.util.HashSet;
 import java.util.Map;
+import java.util.Set;
 import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.commons.collections.CollectionUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
@@ -69,9 +75,10 @@ public class StateContext {
   private final DatanodeStateMachine parent;
   private final AtomicLong stateExecutionCount;
   private final Configuration conf;
-  private final List<GeneratedMessage> reports;
-  private final Queue<ContainerAction> containerActions;
-  private final Queue<PipelineAction> pipelineActions;
+  private final Set<String> endpoints;
+  private final Map<String, List<GeneratedMessage>> reports;
+  private final Map<String, Queue<ContainerAction>> containerActions;
+  private final Map<String, Queue<PipelineAction>> pipelineActions;
   private DatanodeStateMachine.DatanodeStates state;
   private boolean shutdownOnError = false;
 
@@ -96,9 +103,10 @@ public class StateContext {
     this.parent = parent;
     commandQueue = new LinkedList<>();
     cmdStatusMap = new ConcurrentHashMap<>();
-    reports = new LinkedList<>();
-    containerActions = new LinkedList<>();
-    pipelineActions = new LinkedList<>();
+    reports = new HashMap<>();
+    endpoints = new HashSet<>();
+    containerActions = new HashMap<>();
+    pipelineActions = new HashMap<>();
     lock = new ReentrantLock();
     stateExecutionCount = new AtomicLong(0);
   }
@@ -177,7 +185,9 @@ public class StateContext {
   public void addReport(GeneratedMessage report) {
     if (report != null) {
       synchronized (reports) {
-        reports.add(report);
+        for (String endpoint : endpoints) {
+          reports.get(endpoint).add(report);
+        }
       }
     }
   }
@@ -189,9 +199,12 @@ public class StateContext {
    * @param reportsToPutBack list of reports which failed to be sent by
    *                         heartbeat.
    */
-  public void putBackReports(List<GeneratedMessage> reportsToPutBack) {
+  public void putBackReports(List<GeneratedMessage> reportsToPutBack,
+                             String endpoint) {
     synchronized (reports) {
-      reports.addAll(0, reportsToPutBack);
+      if (reports.containsKey(endpoint)){
+        reports.get(endpoint).addAll(0, reportsToPutBack);
+      }
     }
   }
 
@@ -201,8 +214,8 @@ public class StateContext {
    *
    * @return List of reports
    */
-  public List<GeneratedMessage> getAllAvailableReports() {
-    return getReports(Integer.MAX_VALUE);
+  public List<GeneratedMessage> getAllAvailableReports(String endpoint) {
+    return getReports(endpoint, Integer.MAX_VALUE);
   }
 
   /**
@@ -211,13 +224,16 @@ public class StateContext {
    *
    * @return List of reports
    */
-  public List<GeneratedMessage> getReports(int maxLimit) {
+  public List<GeneratedMessage> getReports(String endpoint, int maxLimit) {
     List<GeneratedMessage> reportsToReturn = new LinkedList<>();
     synchronized (reports) {
-      List<GeneratedMessage> tempList = reports.subList(
-          0, min(reports.size(), maxLimit));
-      reportsToReturn.addAll(tempList);
-      tempList.clear();
+      List<GeneratedMessage> reportsForEndpoint = reports.get(endpoint);
+      if (reportsForEndpoint != null) {
+        List<GeneratedMessage> tempList = reportsForEndpoint.subList(
+            0, min(reportsForEndpoint.size(), maxLimit));
+        reportsToReturn.addAll(tempList);
+        tempList.clear();
+      }
     }
     return reportsToReturn;
   }
@@ -230,7 +246,9 @@ public class StateContext {
    */
   public void addContainerAction(ContainerAction containerAction) {
     synchronized (containerActions) {
-      containerActions.add(containerAction);
+      for (String endpoint : endpoints) {
+        containerActions.get(endpoint).add(containerAction);
+      }
     }
   }
 
@@ -241,8 +259,10 @@ public class StateContext {
    */
   public void addContainerActionIfAbsent(ContainerAction containerAction) {
     synchronized (containerActions) {
-      if (!containerActions.contains(containerAction)) {
-        containerActions.add(containerAction);
+      for (String endpoint : endpoints) {
+        if (!containerActions.get(endpoint).contains(containerAction)) {
+          containerActions.get(endpoint).add(containerAction);
+        }
       }
     }
   }
@@ -253,8 +273,8 @@ public class StateContext {
    *
    * @return {@literal List<ContainerAction>}
    */
-  public List<ContainerAction> getAllPendingContainerActions() {
-    return getPendingContainerAction(Integer.MAX_VALUE);
+  public List<ContainerAction> getAllPendingContainerActions(String endpoint) {
+    return getPendingContainerAction(endpoint, Integer.MAX_VALUE);
   }
 
   /**
@@ -263,16 +283,19 @@ public class StateContext {
    *
    * @return {@literal List<ContainerAction>}
    */
-  public List<ContainerAction> getPendingContainerAction(int maxLimit) {
+  public List<ContainerAction> getPendingContainerAction(String endpoint,
+                                                         int maxLimit) {
     List<ContainerAction> containerActionList = new ArrayList<>();
     synchronized (containerActions) {
-      if (!containerActions.isEmpty()) {
-        int size = containerActions.size();
+      if (!containerActions.isEmpty() &&
+          CollectionUtils.isNotEmpty(containerActions.get(endpoint))) {
+        Queue<ContainerAction> actions = containerActions.get(endpoint);
+        int size = actions.size();
         int limit = size > maxLimit ? maxLimit : size;
         for (int count = 0; count < limit; count++) {
           // we need to remove the action from the containerAction queue
           // as well
-          ContainerAction action = containerActions.poll();
+          ContainerAction action = actions.poll();
           Preconditions.checkNotNull(action);
           containerActionList.add(action);
         }
@@ -296,16 +319,20 @@ public class StateContext {
        * action remains same on the given pipeline, it will end up adding it
        * multiple times here.
        */
-      for (PipelineAction pipelineActionIter : pipelineActions) {
-        if (pipelineActionIter.getAction() == pipelineAction.getAction()
-            && pipelineActionIter.hasClosePipeline() && pipelineAction
-            .hasClosePipeline()
-            && pipelineActionIter.getClosePipeline().getPipelineID()
-            .equals(pipelineAction.getClosePipeline().getPipelineID())) {
-          return;
+      for (String endpoint : endpoints) {
+        Queue<PipelineAction> actionsForEndpoint =
+            this.pipelineActions.get(endpoint);
+        for (PipelineAction pipelineActionIter : actionsForEndpoint) {
+          if (pipelineActionIter.getAction() == pipelineAction.getAction()
+              && pipelineActionIter.hasClosePipeline() && pipelineAction
+              .hasClosePipeline()
+              && pipelineActionIter.getClosePipeline().getPipelineID()
+              .equals(pipelineAction.getClosePipeline().getPipelineID())) {
+            break;
+          }
         }
+        actionsForEndpoint.add(pipelineAction);
       }
-      pipelineActions.add(pipelineAction);
     }
   }
 
@@ -315,14 +342,18 @@ public class StateContext {
    *
    * @return {@literal List<ContainerAction>}
    */
-  public List<PipelineAction> getPendingPipelineAction(int maxLimit) {
+  public List<PipelineAction> getPendingPipelineAction(String endpoint,
+                                                       int maxLimit) {
     List<PipelineAction> pipelineActionList = new ArrayList<>();
     synchronized (pipelineActions) {
-      if (!pipelineActions.isEmpty()) {
-        int size = pipelineActions.size();
+      if (!pipelineActions.isEmpty() &&
+          CollectionUtils.isNotEmpty(pipelineActions.get(endpoint))) {
+        Queue<PipelineAction> actionsForEndpoint =
+            this.pipelineActions.get(endpoint);
+        int size = actionsForEndpoint.size();
         int limit = size > maxLimit ? maxLimit : size;
         for (int count = 0; count < limit; count++) {
-          pipelineActionList.add(pipelineActions.poll());
+          pipelineActionList.add(actionsForEndpoint.poll());
         }
       }
       return pipelineActionList;
@@ -499,4 +530,13 @@ public class StateContext {
   public long getHeartbeatFrequency() {
     return heartbeatFrequency.get();
   }
+
+  public void addEndpoint(String endpoint) {
+    if (!endpoints.contains(endpoint)) {
+      this.endpoints.add(endpoint);
+      this.containerActions.put(endpoint, new LinkedList<>());
+      this.pipelineActions.put(endpoint, new LinkedList<>());
+      this.reports.put(endpoint, new LinkedList<>());
+    }
+  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
index 9018801..50da65b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
@@ -105,10 +105,12 @@ public class InitDatanodeState implements DatanodeState,
       }
       for (InetSocketAddress addr : addresses) {
         connectionManager.addSCMServer(addr);
+        this.context.addEndpoint(addr.toString());
       }
       InetSocketAddress reconAddress = getReconAddresses(conf);
       if (reconAddress != null) {
         connectionManager.addReconServer(reconAddress);
+        this.context.addEndpoint(reconAddress.toString());
       }
     }
 
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index a55d0d6..a9d2c2f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -173,7 +173,7 @@ public class HeartbeatEndpointTask
     if (requestBuilder.getIncrementalContainerReportCount() != 0) {
       reports.addAll(requestBuilder.getIncrementalContainerReportList());
     }
-    context.putBackReports(reports);
+    context.putBackReports(reports, rpcEndpoint.getAddressString());
   }
 
   /**
@@ -182,7 +182,8 @@ public class HeartbeatEndpointTask
    * @param requestBuilder builder to which the report has to be added.
    */
   private void addReports(SCMHeartbeatRequestProto.Builder requestBuilder) {
-    for (GeneratedMessage report : context.getAllAvailableReports()) {
+    for (GeneratedMessage report :
+        context.getAllAvailableReports(rpcEndpoint.getAddressString())) {
       String reportName = report.getDescriptorForType().getFullName();
       for (Descriptors.FieldDescriptor descriptor :
           SCMHeartbeatRequestProto.getDescriptor().getFields()) {
@@ -206,7 +207,7 @@ public class HeartbeatEndpointTask
   private void addContainerActions(
       SCMHeartbeatRequestProto.Builder requestBuilder) {
     List<ContainerAction> actions = context.getPendingContainerAction(
-        maxContainerActionsPerHB);
+        rpcEndpoint.getAddressString(), maxContainerActionsPerHB);
     if (!actions.isEmpty()) {
       ContainerActionsProto cap = ContainerActionsProto.newBuilder()
           .addAllContainerActions(actions)
@@ -223,7 +224,7 @@ public class HeartbeatEndpointTask
   private void addPipelineActions(
       SCMHeartbeatRequestProto.Builder requestBuilder) {
     List<PipelineAction> actions = context.getPendingPipelineAction(
-        maxPipelineActionsPerHB);
+        rpcEndpoint.getAddressString(), maxPipelineActionsPerHB);
     if (!actions.isEmpty()) {
       PipelineActionsProto pap = PipelineActionsProto.newBuilder()
           .addAllPipelineActions(actions)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index dbf376f..bc75b39 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -23,6 +23,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.HddsUtils;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
@@ -32,6 +33,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.hdds.utils.Cache;
 import org.apache.hadoop.hdds.utils.ResourceLimitCache;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.conf.DatanodeRatisServerConfig;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.util.Time;
 import org.apache.ratis.proto.RaftProtos.StateMachineLogEntryProto;
@@ -165,10 +167,9 @@ public class ContainerStateMachine extends BaseStateMachine {
     metrics = CSMMetrics.create(gid);
     this.writeChunkFutureMap = new ConcurrentHashMap<>();
     applyTransactionCompletionMap = new ConcurrentHashMap<>();
-    int numPendingRequests = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT
-    );
+    int numPendingRequests = OzoneConfiguration.of(conf)
+        .getObject(DatanodeRatisServerConfig.class)
+        .getLeaderNumPendingRequests();
     int pendingRequestsByteLimit = (int) conf.getStorageSize(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT_DEFAULT,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 17a3892..4dac295 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.conf.DatanodeRatisServerConfig;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.StateContext;
 
@@ -124,11 +125,9 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     final int numWriteChunkThreads = conf.getInt(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_KEY,
         OzoneConfigKeys.DFS_CONTAINER_RATIS_NUM_WRITE_CHUNK_THREADS_DEFAULT);
-    final int queueLimit = conf.getInt(
-            OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS,
-            OzoneConfigKeys.
-                    DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT
-    );
+    final int queueLimit = OzoneConfiguration.of(conf)
+        .getObject(DatanodeRatisServerConfig.class)
+        .getLeaderNumPendingRequests();
     chunkExecutor =
         new ThreadPoolExecutor(numWriteChunkThreads, numWriteChunkThreads,
             100, TimeUnit.SECONDS,
@@ -184,9 +183,6 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     RaftServerConfigKeys.Log.StateMachineData
         .setSyncTimeout(properties, dataSyncTimeout);
 
-    // Set the server Request timeout
-    setServerRequestTimeout(properties);
-
     // set timeout for a retry cache entry
     setTimeoutForRetryCache(properties);
 
@@ -264,25 +260,21 @@ public final class XceiverServerRatis implements XceiverServerSpi {
         ratisServerConfiguration.getNumSnapshotsRetained();
     RaftServerConfigKeys.Snapshot.setRetentionFileNum(properties,
         numSnapshotsRetained);
+
+    // Set properties starting with prefix raft.server
+    RatisHelper.createRaftServerProperties(conf, properties);
+
+    // Set properties starting with prefix raft.grpc
+    RatisHelper.createRaftServerGrpcProperties(conf, properties);
+
     return properties;
   }
 
   private void setNodeFailureTimeout(RaftProperties properties) {
-    TimeUnit timeUnit;
-    long duration;
-    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-        .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_FAILURE_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration nodeFailureTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.Notification.setNoLeaderTimeout(properties,
-        nodeFailureTimeout);
-    RaftServerConfigKeys.Rpc.setSlownessTimeout(properties,
-        nodeFailureTimeout);
-    nodeFailureTimeoutMs = nodeFailureTimeout.toLong(TimeUnit.MILLISECONDS);
+    nodeFailureTimeoutMs =
+        conf.getObject(DatanodeRatisServerConfig.class)
+            .getFollowerSlownessTimeout();
+
   }
 
   private void setRatisLeaderElectionTimeout(RaftProperties properties) {
@@ -322,21 +314,6 @@ public final class XceiverServerRatis implements XceiverServerSpi {
         .setExpiryTime(properties, retryCacheTimeout);
   }
 
-  private void setServerRequestTimeout(RaftProperties properties) {
-    TimeUnit timeUnit;
-    long duration;
-    timeUnit = OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
-        .getUnit();
-    duration = conf.getTimeDuration(
-        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_KEY,
-        OzoneConfigKeys.DFS_RATIS_SERVER_REQUEST_TIMEOUT_DURATION_DEFAULT
-            .getDuration(), timeUnit);
-    final TimeDuration serverRequestTimeout =
-        TimeDuration.valueOf(duration, timeUnit);
-    RaftServerConfigKeys.Rpc
-        .setRequestTimeout(properties, serverRequestTimeout);
-  }
-
   private int setRaftSegmentPreallocatedSize(RaftProperties properties) {
     final int raftSegmentPreallocatedSize = (int) conf.getStorageSize(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_SEGMENT_PREALLOCATED_SIZE_KEY,
@@ -381,11 +358,6 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   }
 
   private void setPendingRequestsLimits(RaftProperties properties) {
-    final int maxPendingRequests = conf.getInt(
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS,
-        OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_NUM_PENDING_REQUESTS_DEFAULT
-    );
-    RaftServerConfigKeys.Write.setElementLimit(properties, maxPendingRequests);
 
     final int pendingRequestsByteLimit = (int)conf.getStorageSize(
         OzoneConfigKeys.DFS_CONTAINER_RATIS_LEADER_PENDING_BYTES_LIMIT,
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
index af1bafe..86221bf 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,10 +20,9 @@ package org.apache.hadoop.ozone.container.common.volume;
 
 import javax.annotation.Nullable;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
@@ -108,11 +107,11 @@ public class HddsVolume
     private final String volumeRootStr;
     private Configuration conf;
     private StorageType storageType;
-    private long configuredCapacity;
 
     private String datanodeUuid;
     private String clusterID;
     private boolean failedVolume = false;
+    private SpaceUsageCheckFactory usageCheckFactory;
 
     public Builder(String rootDirStr) {
       this.volumeRootStr = rootDirStr;
@@ -128,11 +127,6 @@ public class HddsVolume
       return this;
     }
 
-    public Builder configuredCapacity(long capacity) {
-      this.configuredCapacity = capacity;
-      return this;
-    }
-
     public Builder datanodeUuid(String datanodeUUID) {
       this.datanodeUuid = datanodeUUID;
       return this;
@@ -151,6 +145,11 @@ public class HddsVolume
       return this;
     }
 
+    public Builder usageCheckFactory(SpaceUsageCheckFactory factory) {
+      usageCheckFactory = factory;
+      return this;
+    }
+
     public HddsVolume build() throws IOException {
       return new HddsVolume(this);
     }
@@ -165,15 +164,14 @@ public class HddsVolume
       this.datanodeUuid = b.datanodeUuid;
       this.volumeIOStats = new VolumeIOStats();
 
-      VolumeInfo.Builder volumeBuilder =
-          new VolumeInfo.Builder(b.volumeRootStr, b.conf)
-              .storageType(b.storageType)
-              .configuredCapacity(b.configuredCapacity);
-      this.volumeInfo = volumeBuilder.build();
+      volumeInfo = new VolumeInfo.Builder(b.volumeRootStr, b.conf)
+          .storageType(b.storageType)
+          .usageCheckFactory(b.usageCheckFactory)
+          .build();
       this.committedBytes = new AtomicLong(0);
 
-      LOG.info("Creating Volume: " + this.hddsRootDir + " of  storage type : " +
-          b.storageType + " and capacity : " + volumeInfo.getCapacity());
+      LOG.info("Creating Volume: {} of storage type : {} and capacity : {}",
+          hddsRootDir, b.storageType, volumeInfo.getCapacity());
 
       initialize();
     } else {
@@ -364,18 +362,16 @@ public class HddsVolume
     return state;
   }
 
-  public long getCapacity() throws IOException {
-    if(volumeInfo != null) {
-      return volumeInfo.getCapacity();
-    }
-    return 0;
+  public long getCapacity() {
+    return volumeInfo != null ? volumeInfo.getCapacity() : 0;
   }
 
-  public long getAvailable() throws IOException {
-    if(volumeInfo != null) {
-      return volumeInfo.getAvailable();
-    }
-    return 0;
+  public long getAvailable() {
+    return volumeInfo != null ? volumeInfo.getAvailable() : 0;
+  }
+
+  public long getUsedSpace() {
+    return volumeInfo != null ? volumeInfo.getScmUsed() : 0;
   }
 
   public void setState(VolumeState state) {
@@ -443,16 +439,6 @@ public class HddsVolume
   }
 
   /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    if (volumeInfo != null) {
-      volumeInfo.setScmUsageForTesting(scmUsageForTest);
-    }
-  }
-
-  /**
    * Override toSting() to show the path of HddsVolume.
    */
   @Override
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
index 65b5dfd..4358e10 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolumeChecker.java
@@ -18,30 +18,13 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.collect.Sets;
-import com.google.common.util.concurrent.FutureCallback;
-import com.google.common.util.concurrent.Futures;
-import com.google.common.util.concurrent.ListenableFuture;
-import com.google.common.util.concurrent.ThreadFactoryBuilder;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
-import org.apache.hadoop.util.DiskChecker.DiskErrorException;
-import org.apache.hadoop.util.Timer;
-
-import static org.apache.hadoop.hdfs.server.datanode.DataNode.MAX_VOLUME_FAILURE_TOLERATED_LIMIT;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
 import javax.annotation.Nonnull;
 import javax.annotation.Nullable;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.HashSet;
-import java.util.Set;
 import java.util.Optional;
+import java.util.Set;
 import java.util.concurrent.CountDownLatch;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
@@ -49,12 +32,21 @@ import java.util.concurrent.Executors;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicLong;
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.util.Timer;
 
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.collect.Sets;
+import com.google.common.util.concurrent.FutureCallback;
+import com.google.common.util.concurrent.Futures;
+import com.google.common.util.concurrent.ListenableFuture;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * A class that encapsulates running disk checks against each HDDS volume and
@@ -62,6 +54,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_T
  */
 public class HddsVolumeChecker {
 
+  public static final int MAX_VOLUME_FAILURE_TOLERATED_LIMIT = -1;
+
   public static final Logger LOG =
       LoggerFactory.getLogger(HddsVolumeChecker.class);
 
@@ -92,19 +86,19 @@ public class HddsVolumeChecker {
   private final ExecutorService checkVolumeResultHandlerExecutorService;
 
   /**
-   * @param conf Configuration object.
+   * @param conf  Configuration object.
    * @param timer {@link Timer} object used for throttling checks.
    */
   public HddsVolumeChecker(Configuration conf, Timer timer)
       throws DiskErrorException {
     maxAllowedTimeForCheckMs = conf.getTimeDuration(
-        DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
-        DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT,
+        DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
+        DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT,
         TimeUnit.MILLISECONDS);
 
     if (maxAllowedTimeForCheckMs <= 0) {
       throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+          + DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
           + maxAllowedTimeForCheckMs + " (should be > 0)");
     }
 
@@ -115,28 +109,28 @@ public class HddsVolumeChecker {
      * declaring a fatal error.
      */
     int maxVolumeFailuresTolerated = conf.getInt(
-        DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
-        DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
+        DFSConfigKeysLegacy.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY,
+        DFSConfigKeysLegacy.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_DEFAULT);
 
     minDiskCheckGapMs = conf.getTimeDuration(
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT,
+        DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY,
+        DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_MIN_GAP_DEFAULT,
         TimeUnit.MILLISECONDS);
 
     if (minDiskCheckGapMs < 0) {
       throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - "
+          + DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_MIN_GAP_KEY + " - "
           + minDiskCheckGapMs + " (should be >= 0)");
     }
 
     long diskCheckTimeout = conf.getTimeDuration(
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
-        DFSConfigKeys.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT,
+        DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY,
+        DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_DEFAULT,
         TimeUnit.MILLISECONDS);
 
     if (diskCheckTimeout < 0) {
       throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
+          + DFSConfigKeysLegacy.DFS_DATANODE_DISK_CHECK_TIMEOUT_KEY + " - "
           + diskCheckTimeout + " (should be >= 0)");
     }
 
@@ -144,7 +138,8 @@ public class HddsVolumeChecker {
 
     if (maxVolumeFailuresTolerated < MAX_VOLUME_FAILURE_TOLERATED_LIMIT) {
       throw new DiskErrorException("Invalid value configured for "
-          + DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY + " - "
+          + DFSConfigKeysLegacy.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY
+          + " - "
           + maxVolumeFailuresTolerated + " "
           + DataNode.MAX_VOLUME_FAILURES_TOLERATED_MSG);
     }
@@ -166,14 +161,13 @@ public class HddsVolumeChecker {
 
   /**
    * Run checks against all HDDS volumes.
-   *
+   * <p>
    * This check may be performed at service startup and subsequently at
    * regular intervals to detect and handle failed volumes.
    *
    * @param volumes - Set of volumes to be checked. This set must be immutable
    *                for the duration of the check else the results will be
    *                unexpected.
-   *
    * @return set of failed volumes.
    */
   public Set<HddsVolume> checkAllVolumes(Collection<HddsVolume> volumes)
@@ -239,23 +233,23 @@ public class HddsVolumeChecker {
   public interface Callback {
     /**
      * @param healthyVolumes set of volumes that passed disk checks.
-     * @param failedVolumes set of volumes that failed disk checks.
+     * @param failedVolumes  set of volumes that failed disk checks.
      */
     void call(Set<HddsVolume> healthyVolumes,
-              Set<HddsVolume> failedVolumes);
+        Set<HddsVolume> failedVolumes);
   }
 
   /**
    * Check a single volume asynchronously, returning a {@link ListenableFuture}
    * that can be used to retrieve the final result.
-   *
+   * <p>
    * If the volume cannot be referenced then it is already closed and
    * cannot be checked. No error is propagated to the callback.
    *
-   * @param volume the volume that is to be checked.
+   * @param volume   the volume that is to be checked.
    * @param callback callback to be invoked when the volume check completes.
    * @return true if the check was scheduled and the callback will be invoked.
-   *         false otherwise.
+   * false otherwise.
    */
   public boolean checkVolume(final HddsVolume volume, Callback callback) {
     if (volume == null) {
@@ -291,19 +285,18 @@ public class HddsVolumeChecker {
     private final Callback callback;
 
     /**
-     *
      * @param healthyVolumes set of healthy volumes. If the disk check is
      *                       successful, add the volume here.
-     * @param failedVolumes set of failed volumes. If the disk check fails,
-     *                      add the volume here.
-     * @param volumeCounter volumeCounter used to trigger callback invocation.
-     * @param callback invoked when the volumeCounter reaches 0.
+     * @param failedVolumes  set of failed volumes. If the disk check fails,
+     *                       add the volume here.
+     * @param volumeCounter  volumeCounter used to trigger callback invocation.
+     * @param callback       invoked when the volumeCounter reaches 0.
      */
     ResultHandler(HddsVolume volume,
-                  Set<HddsVolume> healthyVolumes,
-                  Set<HddsVolume> failedVolumes,
-                  AtomicLong volumeCounter,
-                  @Nullable Callback callback) {
+        Set<HddsVolume> healthyVolumes,
+        Set<HddsVolume> failedVolumes,
+        AtomicLong volumeCounter,
+        @Nullable Callback callback) {
       this.volume = volume;
       this.healthyVolumes = healthyVolumes;
       this.failedVolumes = failedVolumes;
@@ -366,7 +359,7 @@ public class HddsVolumeChecker {
         if (callback != null && remaining == 0) {
           callback.call(healthyVolumes, failedVolumes);
         }
-      } catch(Exception e) {
+      } catch (Exception e) {
         // Propagating this exception is unlikely to be helpful.
         LOG.warn("Unexpected exception", e);
       }
@@ -375,7 +368,7 @@ public class HddsVolumeChecker {
 
   /**
    * Shutdown the checker and its associated ExecutorService.
-   *
+   * <p>
    * See {@link ExecutorService#awaitTermination} for the interpretation
    * of the parameters.
    */
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
index 31f83ec..215d1e5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeInfo.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -20,7 +20,8 @@ package org.apache.hadoop.ozone.container.common.volume;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
 import org.apache.hadoop.fs.StorageType;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -52,6 +53,7 @@ public final class VolumeInfo {
   public static class Builder {
     private final Configuration conf;
     private final String rootDir;
+    private SpaceUsageCheckFactory usageCheckFactory;
     private StorageType storageType;
     private long configuredCapacity;
 
@@ -70,6 +72,11 @@ public final class VolumeInfo {
       return this;
     }
 
+    public Builder usageCheckFactory(SpaceUsageCheckFactory factory) {
+      this.usageCheckFactory = factory;
+      return this;
+    }
+
     public VolumeInfo build() throws IOException {
       return new VolumeInfo(this);
     }
@@ -80,7 +87,7 @@ public final class VolumeInfo {
     this.rootDir = b.rootDir;
     File root = new File(this.rootDir);
 
-    Boolean succeeded = root.isDirectory() || root.mkdirs();
+    boolean succeeded = root.isDirectory() || root.mkdirs();
 
     if (!succeeded) {
       LOG.error("Unable to create the volume root dir at : {}", root);
@@ -93,25 +100,36 @@ public final class VolumeInfo {
     this.configuredCapacity = (b.configuredCapacity != 0 ?
         b.configuredCapacity : -1);
 
-    this.usage = new VolumeUsage(root, b.conf);
+    SpaceUsageCheckFactory usageCheckFactory = b.usageCheckFactory;
+    if (usageCheckFactory == null) {
+      usageCheckFactory = SpaceUsageCheckFactory.create(b.conf);
+    }
+    SpaceUsageCheckParams checkParams =
+        usageCheckFactory.paramsFor(root);
+
+    this.usage = new VolumeUsage(checkParams);
   }
 
-  public long getCapacity() throws IOException {
+  public long getCapacity() {
     if (configuredCapacity < 0) {
       return usage.getCapacity();
     }
     return configuredCapacity;
   }
 
-  public long getAvailable() throws IOException {
+  public long getAvailable() {
     return usage.getAvailable();
   }
 
-  public long getScmUsed() throws IOException {
-    return usage.getScmUsed();
+  public long getScmUsed() {
+    return usage.getUsedSpace();
+  }
+
+  void start() {
+    usage.start();
   }
 
-  protected void shutdownUsageThread() {
+  void shutdownUsageThread() {
     usage.shutdown();
   }
 
@@ -127,14 +145,6 @@ public final class VolumeInfo {
    * Only for testing. Do not use otherwise.
    */
   @VisibleForTesting
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    usage.setScmUsageForTesting(scmUsageForTest);
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
   public VolumeUsage getUsageForTesting() {
     return usage;
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
index 275f043..600b695 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.container.common.volume;
 
 import java.io.IOException;
+import java.io.UncheckedIOException;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.EnumMap;
@@ -33,6 +34,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
@@ -49,8 +51,8 @@ import org.apache.hadoop.util.Timer;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.collect.ImmutableList;
 import com.google.common.collect.ImmutableMap;
+import static org.apache.hadoop.hdds.DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.util.RunJar.SHUTDOWN_HOOK_PRIORITY;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -87,6 +89,7 @@ public class VolumeSet {
    */
   private final ScheduledExecutorService diskCheckerservice;
   private final ScheduledFuture<?> periodicDiskChecker;
+  private final SpaceUsageCheckFactory usageCheckFactory;
 
   private static final long DISK_CHECK_INTERVAL_MINUTES = 15;
 
@@ -127,6 +130,9 @@ public class VolumeSet {
         }
       }, DISK_CHECK_INTERVAL_MINUTES, DISK_CHECK_INTERVAL_MINUTES,
         TimeUnit.MINUTES);
+
+    usageCheckFactory = SpaceUsageCheckFactory.create(conf);
+
     initializeVolumeSet();
   }
 
@@ -308,6 +314,7 @@ public class VolumeSet {
         .conf(conf)
         .datanodeUuid(datanodeUuid)
         .clusterID(clusterID)
+        .usageCheckFactory(usageCheckFactory)
         .storageType(storageType);
     return volumeBuilder.build();
   }
@@ -475,7 +482,7 @@ public class VolumeSet {
           scmUsed = volumeInfo.getScmUsed();
           remaining = volumeInfo.getAvailable();
           capacity = volumeInfo.getCapacity();
-        } catch (IOException ex) {
+        } catch (UncheckedIOException ex) {
           LOG.warn("Failed to get scmUsed and remaining for container " +
               "storage location {}", volumeInfo.getRootDir(), ex);
           // reset scmUsed and remaining if df/du failed.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
index 693bcb5..1da9b25 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeUsage.java
@@ -18,176 +18,49 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import com.google.common.annotations.VisibleForTesting;
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CachingGetSpaceUsed;
-import org.apache.hadoop.fs.DF;
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.util.Time;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.OutputStreamWriter;
-import java.nio.charset.StandardCharsets;
-import java.util.Scanner;
-import java.util.concurrent.atomic.AtomicReference;
+import org.apache.hadoop.hdds.fs.CachingSpaceUsageSource;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckParams;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
 
 /**
  * Class that wraps the space df of the Datanode Volumes used by SCM
  * containers.
  */
-public class VolumeUsage {
-  private static final Logger LOG = LoggerFactory.getLogger(VolumeUsage.class);
+public class VolumeUsage implements SpaceUsageSource {
 
-  private final File rootDir;
-  private final DF df;
-  private final File scmUsedFile;
-  private AtomicReference<GetSpaceUsed> scmUsage;
+  private final CachingSpaceUsageSource source;
   private boolean shutdownComplete;
 
-  private static final String DU_CACHE_FILE = "scmUsed";
-  private volatile boolean scmUsedSaved = false;
-
-  VolumeUsage(File dataLoc, Configuration conf)
-      throws IOException {
-    this.rootDir = dataLoc;
-
-    // SCM used cache file
-    scmUsedFile = new File(rootDir, DU_CACHE_FILE);
-    // get overall disk df
-    this.df = new DF(rootDir, conf);
-
-    startScmUsageThread(conf);
+  VolumeUsage(SpaceUsageCheckParams checkParams) {
+    source = new CachingSpaceUsageSource(checkParams);
+    start(); // TODO should start only on demand
   }
 
-  void startScmUsageThread(Configuration conf) throws IOException {
-    // get SCM specific df
-    scmUsage = new AtomicReference<>(
-        new CachingGetSpaceUsed.Builder().setPath(rootDir)
-            .setConf(conf)
-            .setInitialUsed(loadScmUsed())
-            .build());
+  @Override
+  public long getCapacity() {
+    return Math.max(source.getCapacity(), 0);
   }
 
-  long getCapacity() {
-    long capacity = df.getCapacity();
-    return (capacity > 0) ? capacity : 0;
+  @Override
+  public long getAvailable() {
+    long l = source.getCapacity() - source.getUsedSpace();
+    return Math.max(Math.min(l, source.getAvailable()), 0);
   }
 
-  /*
-   * Calculate the available space in the volume.
-   */
-  long getAvailable() throws IOException {
-    long remaining = getCapacity() - getScmUsed();
-    long available = df.getAvailable();
-    if (remaining > available) {
-      remaining = available;
-    }
-    return (remaining > 0) ? remaining : 0;
+  @Override
+  public long getUsedSpace() {
+    return source.getUsedSpace();
   }
 
-  long getScmUsed() throws IOException{
-    return scmUsage.get().getUsed();
+  public synchronized void start() {
+    source.start();
   }
 
   public synchronized void shutdown() {
     if (!shutdownComplete) {
-      saveScmUsed();
-
-      if (scmUsage.get() instanceof CachingGetSpaceUsed) {
-        IOUtils.cleanupWithLogger(
-            null, ((CachingGetSpaceUsed) scmUsage.get()));
-      }
+      source.shutdown();
       shutdownComplete = true;
     }
   }
 
-  /**
-   * Read in the cached DU value and return it if it is less than 600 seconds
-   * old (DU update interval). Slight imprecision of scmUsed is not critical
-   * and skipping DU can significantly shorten the startup time.
-   * If the cached value is not available or too old, -1 is returned.
-   */
-  long loadScmUsed() {
-    long cachedScmUsed;
-    long mtime;
-    Scanner sc;
-
-    try {
-      sc = new Scanner(scmUsedFile, "UTF-8");
-    } catch (FileNotFoundException fnfe) {
-      return -1;
-    }
-
-    try {
-      // Get the recorded scmUsed from the file.
-      if (sc.hasNextLong()) {
-        cachedScmUsed = sc.nextLong();
-      } else {
-        return -1;
-      }
-      // Get the recorded mtime from the file.
-      if (sc.hasNextLong()) {
-        mtime = sc.nextLong();
-      } else {
-        return -1;
-      }
-
-      // Return the cached value if mtime is okay.
-      if (mtime > 0 && (Time.now() - mtime < 600000L)) {
-        LOG.info("Cached ScmUsed found for {} : {} ", rootDir,
-            cachedScmUsed);
-        return cachedScmUsed;
-      }
-      return -1;
-    } finally {
-      sc.close();
-    }
-  }
-
-  /**
-   * Write the current scmUsed to the cache file.
-   */
-  void saveScmUsed() {
-    if (scmUsedFile.exists() && !scmUsedFile.delete()) {
-      LOG.warn("Failed to delete old scmUsed file in {}.", rootDir);
-    }
-    OutputStreamWriter out = null;
-    try {
-      long used = getScmUsed();
-      if (used > 0) {
-        out = new OutputStreamWriter(new FileOutputStream(scmUsedFile),
-            StandardCharsets.UTF_8);
-        // mtime is written last, so that truncated writes won't be valid.
-        out.write(Long.toString(used) + " " + Long.toString(Time.now()));
-        out.flush();
-        out.close();
-        out = null;
-      }
-    } catch (IOException ioe) {
-      // If write failed, the volume might be bad. Since the cache file is
-      // not critical, log the error and continue.
-      LOG.warn("Failed to write scmUsed to " + scmUsedFile, ioe);
-    } finally {
-      IOUtils.cleanupWithLogger(null, out);
-    }
-  }
-
-  /**
-   * Only for testing. Do not use otherwise.
-   */
-  @VisibleForTesting
-  @SuppressFBWarnings(
-      value = "IS2_INCONSISTENT_SYNC",
-      justification = "scmUsage is an AtomicReference. No additional " +
-          "synchronization is needed.")
-  public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
-    scmUsage.set(scmUsageForTest);
-  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
index 680f393..3e13968 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolPB.java
@@ -16,11 +16,9 @@
  */
 package org.apache.hadoop.ozone.protocolPB;
 
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos
-    .StorageContainerDatanodeProtocolService;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageContainerDatanodeProtocolService;
 import org.apache.hadoop.hdds.scm.ScmConfig;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.ProtocolInfo;
 import org.apache.hadoop.security.KerberosInfo;
 
@@ -34,7 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
     protocolVersion = 1)
 @KerberosInfo(
     serverPrincipal = ScmConfig.ConfigStrings.HDDS_SCM_KERBEROS_PRINCIPAL_KEY,
-    clientPrincipal = DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
+    clientPrincipal = DFSConfigKeysLegacy.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY)
 public interface StorageContainerDatanodeProtocolPB extends
     StorageContainerDatanodeProtocolService.BlockingInterface {
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
index c60ac66..4aa4cab 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsDatanodeService.java
@@ -17,19 +17,19 @@
  */
 package org.apache.hadoop.ozone;
 
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotNull;
-
 import java.io.File;
 import java.io.IOException;
 
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.ServicePlugin;
+
 import org.junit.After;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotNull;
 import org.junit.Before;
 import org.junit.Test;
 
@@ -51,7 +51,7 @@ public class TestHddsDatanodeService {
         ServicePlugin.class);
 
     String volumeDir = testDir + "/disk1";
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
+    conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
   }
 
   @After
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
index eeca6b2..a6cd862 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/TestHddsSecureDatanodeInit.java
@@ -16,8 +16,16 @@
  */
 package org.apache.hadoop.ozone;
 
-import org.apache.commons.io.FileUtils;
+import java.io.File;
+import java.nio.file.Paths;
+import java.security.KeyPair;
+import java.security.PrivateKey;
+import java.security.PublicKey;
+import java.security.cert.X509Certificate;
+import java.util.concurrent.Callable;
+
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.security.x509.SecurityConfig;
@@ -25,11 +33,14 @@ import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient
 import org.apache.hadoop.hdds.security.x509.certificate.client.DNCertificateClient;
 import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
 import org.apache.hadoop.hdds.security.x509.keys.KeyCodec;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.ServicePlugin;
+
+import org.apache.commons.io.FileUtils;
+import static org.apache.hadoop.ozone.HddsDatanodeService.getLogger;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
 import org.bouncycastle.cert.X509CertificateHolder;
 import org.bouncycastle.pkcs.PKCS10CertificationRequest;
 import org.junit.AfterClass;
@@ -38,17 +49,6 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.File;
-import java.nio.file.Paths;
-import java.security.KeyPair;
-import java.security.PrivateKey;
-import java.security.PublicKey;
-import java.security.cert.X509Certificate;
-import java.util.concurrent.Callable;
-
-import static org.apache.hadoop.ozone.HddsDatanodeService.getLogger;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_SECURITY_ENABLED_KEY;
-
 /**
  * Test class for {@link HddsDatanodeService}.
  */
@@ -75,7 +75,7 @@ public class TestHddsSecureDatanodeInit {
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getPath());
     //conf.set(ScmConfigKeys.OZONE_SCM_NAMES, "localhost");
     String volumeDir = testDir + "/disk1";
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
+    conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, volumeDir);
 
     conf.setBoolean(OZONE_SECURITY_ENABLED_KEY, true);
     conf.setClass(OzoneConfigKeys.HDDS_DATANODE_PLUGINS_KEY,
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
index a9e7c0c..fb3c59f 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
@@ -22,6 +22,8 @@ import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -120,6 +122,9 @@ public final class SCMTestUtils {
         .getRandomizedTempPath());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils
         .getRandomizedTempPath());
+    conf.setClass(SpaceUsageCheckFactory.Conf.configKeyForClassName(),
+        MockSpaceUsageCheckFactory.None.class,
+        SpaceUsageCheckFactory.class);
     return conf;
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
new file mode 100644
index 0000000..f01e89a
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/TestStateContext.java
@@ -0,0 +1,117 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.statemachine;
+
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction.Action.CLOSE;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+import static org.mockito.Mockito.mock;
+
+import java.util.List;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerAction;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineAction;
+import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine.DatanodeStates;
+import org.junit.Test;
+
+import com.google.protobuf.GeneratedMessage;
+
+/**
+ * Test class for Datanode StateContext.
+ */
+public class TestStateContext {
+
+  @Test
+  public void testReportAPIs() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    DatanodeStateMachine datanodeStateMachineMock =
+        mock(DatanodeStateMachine.class);
+    StateContext stateContext = new StateContext(conf,
+        DatanodeStates.getInitState(), datanodeStateMachineMock);
+
+    String scm1 = "scm1:9001";
+    String scm2 = "scm2:9001";
+
+    // Try to add report with endpoint. Should not be stored.
+    stateContext.addReport(mock(GeneratedMessage.class));
+    assertTrue(stateContext.getAllAvailableReports(scm1).isEmpty());
+
+    // Add 2 scm endpoints.
+    stateContext.addEndpoint(scm1);
+    stateContext.addEndpoint(scm2);
+
+    // Add report. Should be added to all endpoints.
+    stateContext.addReport(mock(GeneratedMessage.class));
+    List<GeneratedMessage> allAvailableReports =
+        stateContext.getAllAvailableReports(scm1);
+    assertEquals(1, allAvailableReports.size());
+    assertEquals(1, stateContext.getAllAvailableReports(scm2).size());
+
+    // Assert the reports are no longer available.
+    assertTrue(stateContext.getAllAvailableReports(scm1).isEmpty());
+
+    // Put back reports.
+    stateContext.putBackReports(allAvailableReports, scm1);
+    assertFalse(stateContext.getAllAvailableReports(scm1).isEmpty());
+  }
+
+  @Test
+  public void testActionAPIs() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    DatanodeStateMachine datanodeStateMachineMock =
+        mock(DatanodeStateMachine.class);
+    StateContext stateContext = new StateContext(conf,
+        DatanodeStates.getInitState(), datanodeStateMachineMock);
+
+    String scm1 = "scm1:9001";
+    String scm2 = "scm2:9001";
+
+    // Try to get containerActions for endpoint which is not yet added.
+    List<ContainerAction> containerActions =
+        stateContext.getPendingContainerAction(scm1, 10);
+    assertTrue(containerActions.isEmpty());
+
+    // Try to get pipelineActions for endpoint which is not yet added.
+    List<PipelineAction> pipelineActions =
+        stateContext.getPendingPipelineAction(scm1, 10);
+    assertTrue(pipelineActions.isEmpty());
+
+    // Add 2 scm endpoints.
+    stateContext.addEndpoint(scm1);
+    stateContext.addEndpoint(scm2);
+
+    // Add PipelineAction. Should be added to all endpoints.
+    stateContext.addPipelineActionIfAbsent(
+        PipelineAction.newBuilder().setAction(
+            PipelineAction.Action.CLOSE).build());
+
+    pipelineActions = stateContext.getPendingPipelineAction(scm1, 10);
+    assertEquals(1, pipelineActions.size());
+
+    // Add ContainerAction. Should be added to all endpoints.
+    stateContext.addContainerAction(ContainerAction.newBuilder()
+        .setAction(CLOSE).setContainerID(100L).build());
+
+    containerActions = stateContext.getPendingContainerAction(scm2, 10);
+    assertEquals(1, containerActions.size());
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
index 606940b..c416442 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/states/endpoint/TestHeartbeatEndpointTask.java
@@ -55,6 +55,7 @@ import java.util.UUID;
  */
 public class TestHeartbeatEndpointTask {
 
+  private static final String TEST_SCM_ENDPOINT = "test-scm-1:9861";
 
   @Test
   public void testheartbeatWithoutReports() throws Exception {
@@ -102,6 +103,7 @@ public class TestHeartbeatEndpointTask {
 
     HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
         conf, context, scm);
+    context.addEndpoint(TEST_SCM_ENDPOINT);
     context.addReport(NodeReportProto.getDefaultInstance());
     endpointTask.call();
     SCMHeartbeatRequestProto heartbeat = argument.getValue();
@@ -133,6 +135,7 @@ public class TestHeartbeatEndpointTask {
 
     HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
         conf, context, scm);
+    context.addEndpoint(TEST_SCM_ENDPOINT);
     context.addReport(ContainerReportsProto.getDefaultInstance());
     endpointTask.call();
     SCMHeartbeatRequestProto heartbeat = argument.getValue();
@@ -164,6 +167,7 @@ public class TestHeartbeatEndpointTask {
 
     HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
         conf, context, scm);
+    context.addEndpoint(TEST_SCM_ENDPOINT);
     context.addReport(CommandStatusReportsProto.getDefaultInstance());
     endpointTask.call();
     SCMHeartbeatRequestProto heartbeat = argument.getValue();
@@ -195,6 +199,7 @@ public class TestHeartbeatEndpointTask {
 
     HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
         conf, context, scm);
+    context.addEndpoint(TEST_SCM_ENDPOINT);
     context.addContainerAction(getContainerAction());
     endpointTask.call();
     SCMHeartbeatRequestProto heartbeat = argument.getValue();
@@ -226,6 +231,7 @@ public class TestHeartbeatEndpointTask {
 
     HeartbeatEndpointTask endpointTask = getHeartbeatEndpointTask(
         conf, context, scm);
+    context.addEndpoint(TEST_SCM_ENDPOINT);
     context.addReport(NodeReportProto.getDefaultInstance());
     context.addReport(ContainerReportsProto.getDefaultInstance());
     context.addReport(CommandStatusReportsProto.getDefaultInstance());
@@ -277,6 +283,8 @@ public class TestHeartbeatEndpointTask {
     EndpointStateMachine endpointStateMachine = Mockito
         .mock(EndpointStateMachine.class);
     Mockito.when(endpointStateMachine.getEndPoint()).thenReturn(proxy);
+    Mockito.when(endpointStateMachine.getAddressString())
+        .thenReturn(TEST_SCM_ENDPOINT);
     return HeartbeatEndpointTask.newBuilder()
         .setConfig(conf)
         .setDatanodeDetails(datanodeDetails)
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
index fb2f29b..0d9c876 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolume.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one or more
  * contributor license agreements.  See the NOTICE file distributed with this
  * work for additional information regarding copyright ownership.  The ASF
@@ -18,22 +18,29 @@
 package org.apache.hadoop.ozone.container.common.volume;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.GetSpaceUsed;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
 import org.apache.hadoop.ozone.container.common.helpers.DatanodeVersionFile;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.junit.Before;
 import org.junit.Rule;
 import org.junit.Test;
 import org.junit.rules.TemporaryFolder;
-import org.mockito.Mockito;
 
 import java.io.File;
+import java.time.Duration;
 import java.util.Properties;
 import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
 
+import static org.apache.hadoop.hdds.fs.MockSpaceUsagePersistence.inMemory;
+import static org.apache.hadoop.hdds.fs.MockSpaceUsageSource.fixed;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -44,30 +51,31 @@ public class TestHddsVolume {
   private static final String DATANODE_UUID = UUID.randomUUID().toString();
   private static final String CLUSTER_ID = UUID.randomUUID().toString();
   private static final Configuration CONF = new Configuration();
-  private static final String DU_CACHE_FILE = "scmUsed";
+
   @Rule
   public TemporaryFolder folder = new TemporaryFolder();
-  private File rootDir;
-  private HddsVolume volume;
+
+  private HddsVolume.Builder volumeBuilder;
   private File versionFile;
 
   @Before
   public void setup() throws Exception {
-    rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
-    volume = new HddsVolume.Builder(folder.getRoot().getPath())
+    File rootDir = new File(folder.getRoot(), HddsVolume.HDDS_VOLUME_DIR);
+    volumeBuilder = new HddsVolume.Builder(folder.getRoot().getPath())
         .datanodeUuid(DATANODE_UUID)
         .conf(CONF)
-        .build();
+        .usageCheckFactory(MockSpaceUsageCheckFactory.NONE);
     versionFile = HddsVolumeUtil.getVersionFile(rootDir);
   }
 
   @Test
   public void testHddsVolumeInitialization() throws Exception {
+    HddsVolume volume = volumeBuilder.build();
 
     // The initial state of HddsVolume should be "NOT_FORMATTED" when
     // clusterID is not specified and the version file should not be written
     // to disk.
-    assertTrue(volume.getClusterID() == null);
+    assertNull(volume.getClusterID());
     assertEquals(StorageType.DEFAULT, volume.getStorageType());
     assertEquals(HddsVolume.VolumeState.NOT_FORMATTED,
         volume.getStorageState());
@@ -82,12 +90,14 @@ public class TestHddsVolume {
     // NORMAL and the version file should exist.
     assertTrue("Volume format should create Version file",
         versionFile.exists());
-    assertEquals(volume.getClusterID(), CLUSTER_ID);
+    assertEquals(CLUSTER_ID, volume.getClusterID());
     assertEquals(HddsVolume.VolumeState.NORMAL, volume.getStorageState());
   }
 
   @Test
   public void testReadPropertiesFromVersionFile() throws Exception {
+    HddsVolume volume = volumeBuilder.build();
+
     volume.format(CLUSTER_ID);
 
     Properties properties = DatanodeVersionFile.readFrom(versionFile);
@@ -111,14 +121,19 @@ public class TestHddsVolume {
 
   @Test
   public void testShutdown() throws Exception {
-    // Return dummy value > 0 for scmUsage so that scm cache file is written
-    // during shutdown.
-    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
-    volume.setScmUsageForTesting(scmUsageMock);
-    Mockito.when(scmUsageMock.getUsed()).thenReturn(Long.valueOf(100));
+    long initialUsedSpace = 250;
+    AtomicLong savedUsedSpace = new AtomicLong(initialUsedSpace);
+    SpaceUsagePersistence persistence = inMemory(savedUsedSpace);
+    SpaceUsageSource spaceUsage = fixed(500, 200);
+    long expectedUsedSpace = spaceUsage.getUsedSpace();
+    SpaceUsageCheckFactory factory = MockSpaceUsageCheckFactory.of(
+        spaceUsage, Duration.ZERO, persistence);
+    volumeBuilder.usageCheckFactory(factory);
+
+    HddsVolume volume = volumeBuilder.build();
 
-    assertTrue("Available volume should be positive",
-        volume.getAvailable() > 0);
+    assertEquals(initialUsedSpace, savedUsedSpace.get());
+    assertEquals(expectedUsedSpace, volume.getUsedSpace());
 
     // Shutdown the volume.
     volume.shutdown();
@@ -127,13 +142,11 @@ public class TestHddsVolume {
     assertEquals(HddsVolume.VolumeState.NON_EXISTENT, volume.getStorageState());
 
     // Volume should save scmUsed cache file once volume is shutdown
-    File scmUsedFile = new File(folder.getRoot(), DU_CACHE_FILE);
-    System.out.println("scmUsedFile: " + scmUsedFile);
-    assertTrue("scmUsed cache file should be saved on shutdown",
-        scmUsedFile.exists());
+    assertEquals(expectedUsedSpace, savedUsedSpace.get());
 
     // Volume.getAvailable() should succeed even when usage thread
     // is shutdown.
-    volume.getAvailable();
+    assertEquals(spaceUsage.getAvailable(), volume.getAvailable());
   }
+
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java
index 2e267be..47763c5 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestHddsVolumeChecker.java
@@ -20,7 +20,8 @@ package org.apache.hadoop.ozone.container.common.volume;
 
 import com.google.common.util.concurrent.Futures;
 import com.google.common.util.concurrent.ListenableFuture;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.checker.Checkable;
 import org.apache.hadoop.hdfs.server.datanode.checker.VolumeCheckResult;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -102,7 +103,7 @@ public class TestHddsVolumeChecker {
     LOG.info("Executing {}", testName.getMethodName());
     final HddsVolume volume = makeVolumes(1, expectedVolumeHealth).get(0);
     final HddsVolumeChecker checker =
-        new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer());
+        new HddsVolumeChecker(new OzoneConfiguration(), new FakeTimer());
     checker.setDelegateChecker(new DummyChecker());
     final AtomicLong numCallbackInvocations = new AtomicLong(0);
 
@@ -144,7 +145,7 @@ public class TestHddsVolumeChecker {
     final List<HddsVolume> volumes = makeVolumes(
         NUM_VOLUMES, expectedVolumeHealth);
     final HddsVolumeChecker checker =
-        new HddsVolumeChecker(new HdfsConfiguration(), new FakeTimer());
+        new HddsVolumeChecker(new OzoneConfiguration(), new FakeTimer());
     checker.setDelegateChecker(new DummyChecker());
 
     Set<HddsVolume> failedVolumes = checker.checkAllVolumes(volumes);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
index d0fbf10..19ee54d 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java
@@ -1,4 +1,4 @@
-/**
+/*
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -18,21 +18,24 @@
 
 package org.apache.hadoop.ozone.container.common.volume;
 
-import org.apache.hadoop.fs.GetSpaceUsed;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.fs.SpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.SpaceUsagePersistence;
+import org.apache.hadoop.hdds.fs.SpaceUsageSource;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory;
+import org.apache.hadoop.hdds.fs.MockSpaceUsageSource;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
-import org.apache.hadoop.util.ReflectionUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.Mockito;
 
 import java.io.IOException;
+import java.time.Duration;
+import java.util.ArrayList;
 import java.util.List;
-import java.util.UUID;
+
+import static org.apache.hadoop.test.GenericTestUtils.getTestDir;
 
 /**
  * Tests {@link RoundRobinVolumeChoosingPolicy}.
@@ -40,32 +43,41 @@ import java.util.UUID;
 public class TestRoundRobinVolumeChoosingPolicy {
 
   private RoundRobinVolumeChoosingPolicy policy;
-  private List<HddsVolume> volumes;
-  private VolumeSet volumeSet;
-
-  private final String baseDir = MiniDFSCluster.getBaseDirectory();
-  private final String volume1 = baseDir + "disk1";
-  private final String volume2 = baseDir + "disk2";
+  private final List<HddsVolume> volumes = new ArrayList<>();
 
-  private static final String DUMMY_IP_ADDR = "0.0.0.0";
+  private static final Configuration CONF = new Configuration();
+  private static final String BASE_DIR =
+      getTestDir(TestRoundRobinVolumeChoosingPolicy.class.getSimpleName())
+          .getAbsolutePath();
+  private static final String VOLUME_1 = BASE_DIR + "disk1";
+  private static final String VOLUME_2 = BASE_DIR + "disk2";
 
   @Before
   public void setup() throws Exception {
-    OzoneConfiguration conf = new OzoneConfiguration();
-    String dataDirKey = volume1 + "," + volume2;
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
-    policy = ReflectionUtils.newInstance(
-        RoundRobinVolumeChoosingPolicy.class, null);
-    volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
-    volumes = volumeSet.getVolumesList();
+    policy = new RoundRobinVolumeChoosingPolicy();
+
+    SpaceUsageSource source1 = MockSpaceUsageSource.fixed(500, 100);
+    SpaceUsageCheckFactory factory1 = MockSpaceUsageCheckFactory.of(
+        source1, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
+    HddsVolume vol1 = new HddsVolume.Builder(VOLUME_1)
+        .conf(CONF)
+        .usageCheckFactory(factory1)
+        .build();
+    SpaceUsageSource source2 = MockSpaceUsageSource.fixed(500, 200);
+    SpaceUsageCheckFactory factory2 = MockSpaceUsageCheckFactory.of(
+        source2, Duration.ZERO, SpaceUsagePersistence.None.INSTANCE);
+    HddsVolume vol2 = new HddsVolume.Builder(VOLUME_2)
+        .conf(CONF)
+        .usageCheckFactory(factory2)
+        .build();
+
+    volumes.add(vol1);
+    volumes.add(vol2);
   }
 
   @After
   public void cleanUp() {
-    if (volumeSet != null) {
-      volumeSet.shutdown();
-      volumeSet = null;
-    }
+    volumes.forEach(HddsVolume::shutdown);
   }
 
   @Test
@@ -73,12 +85,6 @@ public class TestRoundRobinVolumeChoosingPolicy {
     HddsVolume hddsVolume1 = volumes.get(0);
     HddsVolume hddsVolume2 = volumes.get(1);
 
-    // Set available space in volume1 to 100L
-    setAvailableSpace(hddsVolume1, 100L);
-
-    // Set available space in volume1 to 200L
-    setAvailableSpace(hddsVolume2, 200L);
-
     Assert.assertEquals(100L, hddsVolume1.getAvailable());
     Assert.assertEquals(200L, hddsVolume2.getAvailable());
 
@@ -104,34 +110,16 @@ public class TestRoundRobinVolumeChoosingPolicy {
 
   @Test
   public void testRRPolicyExceptionMessage() throws Exception {
-    HddsVolume hddsVolume1 = volumes.get(0);
-    HddsVolume hddsVolume2 = volumes.get(1);
-
-    // Set available space in volume1 to 100L
-    setAvailableSpace(hddsVolume1, 100L);
-
-    // Set available space in volume1 to 200L
-    setAvailableSpace(hddsVolume2, 200L);
-
     int blockSize = 300;
     try {
       policy.chooseVolume(volumes, blockSize);
       Assert.fail("expected to throw DiskOutOfSpaceException");
     } catch(DiskOutOfSpaceException e) {
-      Assert.assertEquals("Not returnig the expected message",
+      Assert.assertEquals("Not returning the expected message",
           "Out of space: The volume with the most available space (=" + 200
               + " B) is less than the container size (=" + blockSize + " B).",
           e.getMessage());
     }
   }
 
-  private void setAvailableSpace(HddsVolume hddsVolume, long availableSpace)
-      throws IOException {
-    GetSpaceUsed scmUsageMock = Mockito.mock(GetSpaceUsed.class);
-    hddsVolume.setScmUsageForTesting(scmUsageMock);
-    // Set used space to capacity -requiredAvailableSpace so that
-    // getAvailable() returns us the specified availableSpace.
-    Mockito.when(scmUsageMock.getUsed()).thenReturn(
-        (hddsVolume.getCapacity() - availableSpace));
-  }
 }
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
index e3571e5..07bafa7 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java
@@ -22,8 +22,8 @@ import java.io.IOException;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.test.GenericTestUtils;
@@ -76,7 +76,7 @@ public class TestVolumeSet {
     String dataDirKey = volume1 + "," + volume2;
     volumes.add(volume1);
     volumes.add(volume2);
-    conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
+    conf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
     initializeVolumeSet();
   }
 
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
index c5deff0..6759233 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java
@@ -27,6 +27,7 @@ import java.util.Set;
 import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@@ -35,10 +36,8 @@ import org.apache.hadoop.util.Timer;
 import com.google.common.collect.Iterables;
 import org.apache.commons.io.FileUtils;
 import org.apache.curator.shaded.com.google.common.collect.ImmutableSet;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.hamcrest.CoreMatchers.is;
 import org.junit.After;
-
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
@@ -72,7 +71,7 @@ public class TestVolumeSetDiskChecks {
   @After
   public void cleanup() {
     final Collection<String> dirs = conf.getTrimmedStringCollection(
-        DFS_DATANODE_DATA_DIR_KEY);
+        DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY);
 
     for (String d: dirs) {
       FileUtils.deleteQuietly(new File(d));
@@ -96,7 +95,7 @@ public class TestVolumeSetDiskChecks {
 
     // Verify that the Ozone dirs were created during initialization.
     Collection<String> dirs = conf.getTrimmedStringCollection(
-        DFS_DATANODE_DATA_DIR_KEY);
+        DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY);
     for (String d : dirs) {
       assertTrue(new File(d).isDirectory());
     }
@@ -163,7 +162,8 @@ public class TestVolumeSetDiskChecks {
     for (int i = 0; i < numDirs; ++i) {
       dirs.add(GenericTestUtils.getRandomizedTestDir().getPath());
     }
-    ozoneConf.set(DFS_DATANODE_DATA_DIR_KEY, String.join(",", dirs));
+    ozoneConf.set(DFSConfigKeysLegacy.DFS_DATANODE_DATA_DIR_KEY,
+        String.join(",", dirs));
     return ozoneConf;
   }
 
diff --git a/hadoop-hdds/container-service/src/test/resources/ozone-site.xml b/hadoop-hdds/container-service/src/test/resources/ozone-site.xml
new file mode 100644
index 0000000..ea24fbb
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/resources/ozone-site.xml
@@ -0,0 +1,30 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <property>
+    <name>hdds.datanode.du.factory.classname</name>
+    <value>org.apache.hadoop.hdds.fs.MockSpaceUsageCheckFactory$None</value>
+  </property>
+
+
+</configuration>
diff --git a/hadoop-hdds/docs/content/shell/_index.zh.md b/hadoop-hdds/docs/content/interface/_index.zh.md
similarity index 80%
copy from hadoop-hdds/docs/content/shell/_index.zh.md
copy to hadoop-hdds/docs/content/interface/_index.zh.md
index 613a8f0..31617ca 100644
--- a/hadoop-hdds/docs/content/shell/_index.zh.md
+++ b/hadoop-hdds/docs/content/interface/_index.zh.md
@@ -1,8 +1,8 @@
 ---
-title: Command Line Interface (测试)
+title: "编程接口"
 menu:
    main:
-      weight: 3
+      weight: 4
 ---
 <!---
   Licensed to the Apache Software Foundation (ASF) under one or more
@@ -21,4 +21,6 @@ menu:
   limitations under the License.
 -->
 
-测试页面
+{{<jumbotron title="多协议支持">}}
+Ozone 是一个多协议文件系统,用户可以通过多种协议访问 Ozone 中的数据。
+{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.md b/hadoop-hdds/docs/content/shell/BucketCommands.md
index e817349..fa63ad7 100644
--- a/hadoop-hdds/docs/content/shell/BucketCommands.md
+++ b/hadoop-hdds/docs/content/shell/BucketCommands.md
@@ -36,6 +36,7 @@ The `bucket create` command allows users to create a bucket.
 | Arguments                      |  Comment                                |
 |--------------------------------|-----------------------------------------|
 | -g, \-\-enforcegdpr            | Optional, if set to true it creates a GDPR compliant bucket, if not specified or set to false, it creates an ordinary bucket.
+| -k, \-\-bucketKey              | Optional, if a bucket encryption key name from the configured KMS server is specified, the files in the bucket will be transparently encrypted. Instruction on KMS configuration can be found from Hadoop KMS document. 
 |  Uri                           | The name of the bucket in **/volume/bucket** format.
 
 
diff --git a/hadoop-hdds/docs/content/shell/BucketCommands.zh.md b/hadoop-hdds/docs/content/shell/BucketCommands.zh.md
new file mode 100644
index 0000000..9afd280
--- /dev/null
+++ b/hadoop-hdds/docs/content/shell/BucketCommands.zh.md
@@ -0,0 +1,98 @@
+---
+title: 桶命令
+summary: 用桶命令管理桶的生命周期
+weight: 3
+---
+<!---
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+Ozone shell 提供以下桶命令:
+
+  * [创建](#创建)
+  * [删除](#删除)
+  * [查看](#查看)
+  * [列举](#列举)
+
+### 创建
+
+用户使用 `bucket create` 命令来创建桶。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+| -g, \-\-enforcegdpr            | 可选,如果设置为 true 则创建符合 GDPR 规范的桶,设置为 false 或不指定则创建普通的桶|
+| -k, \-\-bucketKey              | 可选,如果指定了 KMS 服务器中的桶加密密钥名,该桶中的文件都会被自动加密,KMS 的配置说明可以参考 Hadoop KMS 文档。  
+|  Uri                           | 桶名,格式为 **/volume/bucket** |
+
+
+{{< highlight bash >}}
+ozone sh bucket create /hive/jan
+{{< /highlight >}}
+
+上述命令会在 _hive_ 卷中创建一个名为 _jan_ 的桶,因为没有指定 scheme,默认使用 O3(RPC)协议。
+
+### 删除 
+
+用户使用 `bucket delete` 命令来删除桶,如果桶不为空,此命令将失败。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 桶名 |
+
+{{< highlight bash >}}
+ozone sh bucket delete /hive/jan
+{{< /highlight >}}
+
+如果 _jan_ 桶不为空,上述命令会将其删除。
+
+### 查看
+
+`bucket info` 命令返回桶的信息。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 桶名 | 
+
+{{< highlight bash >}}
+ozone sh bucket info /hive/jan
+{{< /highlight >}}
+
+上述命令会打印出 _jan_ 桶的有关信息。
+
+### 列举
+
+用户通过 `bucket list` 命令列举一个卷下的所有桶。
+
+***参数:***
+
+| 参数                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+| -l, \-\-length                   | 返回结果的最大数量,默认为 100
+| -p, \-\-prefix                   | 可选,只有匹配指定前缀的桶会被返回
+| -s, \-\-start                    | 从指定键开始列举
+|  Uri                           | 卷名
+
+{{< highlight bash >}}
+ozone sh bucket list /hive
+{{< /highlight >}}
+
+此命令会列出 _hive_ 卷中的所有桶。
diff --git a/hadoop-hdds/docs/content/shell/Format.md b/hadoop-hdds/docs/content/shell/Format.md
index 72174c9..d6c9d2f 100644
--- a/hadoop-hdds/docs/content/shell/Format.md
+++ b/hadoop-hdds/docs/content/shell/Format.md
@@ -53,12 +53,12 @@ create, list, delete etc.
 
 Ozone URL can point to a volume, bucket or keys in the following format:
 
-_\[scheme\]\[server:port\]/volume/bucket/key_
+_\[schema\]\[server:port\]/volume/bucket/key_
 
 
 Where,
 
-1. **Scheme** - This should be `o3` which is the native RPC protocol to access
+1. **Schema** - This should be `o3` which is the native RPC protocol to access
   Ozone API. The usage of the schema is optional.
 
 2. **Server:Port** - This is the address of the Ozone Manager. If the port is
diff --git a/hadoop-hdds/docs/content/shell/Format.zh.md b/hadoop-hdds/docs/content/shell/Format.zh.md
new file mode 100644
index 0000000..edfcbdc
--- /dev/null
+++ b/hadoop-hdds/docs/content/shell/Format.zh.md
@@ -0,0 +1,65 @@
+---
+title: Shell 概述
+summary: shell 命令的语法介绍。
+weight: 1
+---
+<!---
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+Ozone shell 的帮助命令既可以在 _对象_ 级别调用,也可以在 _操作_ 级别调用。
+比如:
+
+{{< highlight bash >}}
+ozone sh volume --help
+{{< /highlight >}}
+
+此命令会列出所有对卷的可能操作。
+
+你也可以通过它查看特定操作的帮助,比如:
+
+{{< highlight bash >}}
+ozone sh volume create --help
+{{< /highlight >}}
+
+这条命令会给出 create 命令的命令行选项。
+
+</p>
+
+
+### 通用命令格式
+
+Ozone shell 命令都遵照以下格式:
+
+> _ozone sh object action url_
+
+**ozone** 脚本用来调用所有 Ozone 子命令,ozone shell 通过 ```sh``` 子命令调用。
+
+对象可以是卷、桶或键,操作一般是各种动词,比如 create、list、delete 等等。
+
+
+Ozone URL 可以指向卷、桶或键,格式如下:
+
+_\[schema\]\[server:port\]/volume/bucket/key_
+
+
+其中,
+
+1. **Schema** - 可选,默认为 `o3`,表示使用原生 RPC 协议来访问 Ozone API。
+
+2. **Server:Port** - OM 的地址,如果省略了端口, 则使用 ozone-site.xml 中的默认端口。
+
+根据具体的命令不同,卷名、桶名和键名将用来构成 URL,卷、桶和键命令的文档有更多具体的说明。
diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.md b/hadoop-hdds/docs/content/shell/KeyCommands.md
index b4a38c8..4dcfb8c 100644
--- a/hadoop-hdds/docs/content/shell/KeyCommands.md
+++ b/hadoop-hdds/docs/content/shell/KeyCommands.md
@@ -110,8 +110,8 @@ The `key list` command allows user to list all keys in a bucket.
 
 | Arguments                      |  Comment                                |
 |--------------------------------|-----------------------------------------|
-| -l, \-\-length                   | Maximum number of results to return. Default: 1000
-| -p, \-\-prefix                   | Optional, Only buckets that match this prefix will be returned.
+| -l, \-\-length                   | Maximum number of results to return. Default: 100
+| -p, \-\-prefix                   | Optional, Only keys that match this prefix will be returned.
 | -s, \-\-start                    | The listing will start from key after the start key.
 |  Uri                           | The name of the _volume_.
 
diff --git a/hadoop-hdds/docs/content/shell/KeyCommands.zh.md b/hadoop-hdds/docs/content/shell/KeyCommands.zh.md
new file mode 100644
index 0000000..dc19678
--- /dev/null
+++ b/hadoop-hdds/docs/content/shell/KeyCommands.zh.md
@@ -0,0 +1,138 @@
+---
+title: 键命令
+summary: 用键命令管理键/对象的生命周期
+weight: 4
+---
+<!---
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+
+Ozone shell 提供以下键命令:
+
+  * [下载](#下载)
+  * [上传](#上传)
+  * [删除](#删除)
+  * [查看](#查看)
+  * [列举](#列举)
+  * [重命名](#重命名)
+
+
+### 下载
+
+`key get` 命令从 Ozone 集群下载一个键到本地文件系统。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 键名,格式为 **/volume/bucket/key**
+|  FileName                      | 下载到本地后的文件名
+
+
+{{< highlight bash >}}
+ozone sh key get /hive/jan/sales.orc sales.orc
+{{< /highlight >}}
+
+从 _/hive/jan_ 桶中下载 sales.orc 文件,写入到本地名为 sales.orc 的文件。
+
+### 上传
+
+`key put` 命令从本地文件系统上传一个文件到指定的桶。
+
+***参数:***
+
+| 参数名                      |  说明                                 |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 键名,格式为 **/volume/bucket/key**
+|  FileName                      | 待上传的本地文件
+| -r, \-\-replication              | 可选,上传后的副本数,合法值为 ONE 或者 THREE,如果不设置,将采用集群配置中的默认值。
+
+{{< highlight bash >}}
+ozone sh key put /hive/jan/corrected-sales.orc sales.orc
+{{< /highlight >}}
+
+上述命令将 sales.orc 文件作为新键上传到 _/hive/jan/corrected-sales.orc_ 。
+
+### 删除
+
+`key delete` 命令用来从桶中删除指定键。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 键名
+
+{{< highlight bash >}}
+ozone sh key delete /hive/jan/corrected-sales.orc
+{{< /highlight >}}
+
+上述命令会将 _/hive/jan/corrected-sales.orc_ 这个键删除。
+
+
+### 查看
+
+`key info` 命令返回指定键的信息。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 键名
+
+{{< highlight bash >}}
+ozone sh key info /hive/jan/sales.orc
+{{< /highlight >}}
+
+上述命令会打印出 _/hive/jan/sales.orc_ 键的相关信息。
+
+### 列举
+
+用户通过 `key list` 命令列出一个桶中的所有键。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+| -l, \-\-length                   | 返回结果的最大数量,默认值为 100
+| -p, \-\-prefix                   | 可选,只有匹配指定前缀的键会被返回
+| -s, \-\-start                    | 从指定键开始列举
+|  Uri                           | 桶名
+
+{{< highlight bash >}}
+ozone sh key list /hive/jan
+{{< /highlight >}}
+
+此命令会列出 _/hive/jan_ 桶中的所有键。
+
+### 重命名
+
+`key rename` 命令用来修改指定桶中的已有键的键名。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 桶名,格式为 **/volume/bucket**
+|  FromKey                       | 旧的键名
+|  ToKey                         | 新的键名
+
+{{< highlight bash >}}
+ozone sh key rename /hive/jan sales.orc new_name.orc
+{{< /highlight >}}
+
+上述命令会将 _/hive/jan_ 桶中的 _sales.orc_ 重命名为 _new\_name.orc_ 。
diff --git a/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md b/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md
new file mode 100644
index 0000000..b4a4c28
--- /dev/null
+++ b/hadoop-hdds/docs/content/shell/VolumeCommands.zh.md
@@ -0,0 +1,107 @@
+---
+title: 卷命令
+weight: 2
+summary: 用卷命令管理卷的生命周期
+---
+<!---
+  Licensed to the Apache Software Foundation (ASF) under one or more
+  contributor license agreements.  See the NOTICE file distributed with
+  this work for additional information regarding copyright ownership.
+  The ASF licenses this file to You under the Apache License, Version 2.0
+  (the "License"); you may not use this file except in compliance with
+  the License.  You may obtain a copy of the License at
+
+      http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+卷命令通常需要管理员权限,ozone shell 支持以下卷命令:
+
+  * [创建](#创建)
+  * [删除](#删除)
+  * [查看](#查看)
+  * [列举](#列举)
+  * [更新](#更新)
+
+### 创建
+
+管理员可以通过 `volume create` 命令创建一个卷并分配给一个用户。
+
+***参数:***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+| -q, \-\-quota                    | 可选,指明该卷在 Ozone 集群所能使用的最大空间,即限额。         |
+| -u, \-\-user                     |  必需,指明该卷的所有者,此用户可以在该卷中创建桶和键。    |
+|  Uri                           | 卷名                                       |
+
+{{< highlight bash >}}
+ozone sh volume create --quota=1TB --user=bilbo /hive
+{{< /highlight >}}
+
+上述命令会在 ozone 集群中创建名为 _hive_ 的卷,卷的限额为 1TB,所有者为 _bilbo_ 。
+
+### 删除
+
+管理员可以通过 `volume delete` 命令删除一个卷,如果卷不为空,此命令将失败。
+
+***参数***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 卷名 |
+
+{{< highlight bash >}}
+ozone sh volume delete /hive
+{{< /highlight >}}
+
+如果 hive 卷中不包含任何桶,上述命令将删除 hive 卷。
+
+### 查看
+
+通过 `volume info` 命令可以获取卷的限额和所有者信息。
+
+***参数:***
+
+| 参数名                     |  说明                                |
+|--------------------------------|-----------------------------------------|
+|  Uri                           | 卷名     | 
+
+{{< highlight bash >}}
+ozone sh volume info /hive
+{{< /highlight >}}
+
+上述命令会打印出 hive 卷的相关信息。
+
+### 列举
+
+`volume list` 命令用来列举一个用户拥有的所有卷。
+
+{{< highlight bash >}}
+ozone sh volume list --user hadoop
+{{< /highlight >}}
+
+上述命令会打印出 hadoop 用户拥有的所有卷。
+
+### 更新
+
+`volume update` 命令用来修改卷的所有者和限额。
+
+***参数***
+
+| 参数名                      |  说明                                |
+|--------------------------------|-----------------------------------------|
+| -q, \-\-quota                    | 可选,重新指定该卷在 Ozone 集群中的限额。  |
+| -u, \-\-user                     | 可选,重新指定该卷的所有者 |
+|  Uri                           | 卷名                                        |
+
+{{< highlight bash >}}
+ozone sh volume update --quota=10TB /hive
+{{< /highlight >}}
+
+上述命令将 hive 卷的限额更新为 10TB。
diff --git a/hadoop-hdds/docs/content/shell/_index.zh.md b/hadoop-hdds/docs/content/shell/_index.zh.md
index 613a8f0..0f6220b 100644
--- a/hadoop-hdds/docs/content/shell/_index.zh.md
+++ b/hadoop-hdds/docs/content/shell/_index.zh.md
@@ -1,5 +1,5 @@
 ---
-title: Command Line Interface (测试)
+title: 命令行接口
 menu:
    main:
       weight: 3
@@ -21,4 +21,7 @@ menu:
   limitations under the License.
 -->
 
-测试页面
+
+{{<jumbotron title="OzoneShell">}}
+    Ozone shell 是用户与 Ozone 进行交互的主要接口,它提供了操作 Ozone 的命令行接口。
+{{</jumbotron>}}
diff --git a/hadoop-hdds/docs/content/start/FromSource.zh.md b/hadoop-hdds/docs/content/start/FromSource.zh.md
index 9ce0cc4..a1b9f37 100644
--- a/hadoop-hdds/docs/content/start/FromSource.zh.md
+++ b/hadoop-hdds/docs/content/start/FromSource.zh.md
@@ -1,5 +1,5 @@
 ---
-title: From Source
+title: 从源码构建 Ozone
 weight: 30
 ---
 <!---
@@ -25,44 +25,35 @@ weight: 30
  * Protoc (2.5)
 {{< /requirements >}}
 
-<div class="alert alert-info" role="alert">This is a guide on how to build the ozone sources.  If you are <font
-color="red">not</font>
-planning to build sources yourself, you can safely skip this page.</div>
+<div class="alert alert-info" role="alert">本文档是关于从源码构建 Ozone 的指南,如果你<font
+color="red">不</font>打算亲自这么做,你大可放心地跳过本页。</div>
 
-If you are a Hadoop ninja, and wise in the ways of Apache, you already know
-that a real Apache release is a source release.
+如果你十分了解 Hadoop,并且熟悉 Apache 之道,那你应当知道 Apache 发行包的精髓在于源代码。
 
-If you want to build from sources, Please untar the source tarball and run
-the ozone build command. This instruction assumes that you have all the
-dependencies to build Hadoop on your build machine. If you need instructions
-on how to build Hadoop, please look at the Apache Hadoop Website.
+从源码构建 ozone 只需要解压源码压缩包然后运行构建命令即可,下面这条命令假设你的机器上拥有构建 Hadoop 所需的所有环境,如果你需要构建 Hadoop 的指南,请查看 Apache Hadoop 网站。
 
 ```bash
 mvn clean package -DskipTests=true
 ```
 
-This will build an ozone-\<version\>.tar.gz in your `hadoop-ozone/dist/target` directory.
+命令执行完成后,`hadoop-ozone/dist/target` 目录下会生成一个 ozone-\<version\>.tar.gz 文件。
 
-You can copy this tarball and use this instead of binary artifacts that are
-provided along with the official release.
+你可以拷贝和使用这个压缩包来替代官方发行的二进制包。
 
-## How to test the build
+## 构建结果测试
 
-You can run the acceptance tests in the hadoop-ozone directory to make sure
-that  your build is functional. To launch the acceptance tests, please follow
- the instructions in the **README.md** in the `smoketest` directory.
+为了确保从源码构建出的二进制包可用,你可以运行 hadoop-zone 目录下的验收测试集,测试方法请参照 `smoketest` 目录下的 **READMD.md** 说明。
 
 ```bash
 cd smoketest
 ./test.sh
 ```
 
- You can also execute only a minimal subset of the tests:
+你也可以只执行最基本的验收测试:
 
 ```bash
 cd smoketest
 ./test.sh --env ozone basic
 ```
 
-Acceptance tests will start a small ozone cluster and verify that ozone shell and ozone file
- system is fully functional.
+验收测试会启动一个基于 docker-compose 的小型 ozone 集群,然后验证 ozone shell 和文件系统是否完全可用。
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/start/Kubernetes.zh.md b/hadoop-hdds/docs/content/start/Kubernetes.zh.md
index ad85534..21bb704 100644
--- a/hadoop-hdds/docs/content/start/Kubernetes.zh.md
+++ b/hadoop-hdds/docs/content/start/Kubernetes.zh.md
@@ -1,5 +1,5 @@
 ---
-title: Ozone on Kubernetes
+title: 在 Kubernetes 集群上部署 Ozone
 weight: 22
 ---
 <!---
@@ -21,31 +21,32 @@ weight: 22
 
 
 {{< requirements >}}
- * Working kubernetes cluster (LoadBalancer, PersistentVolume are not required)
+ * 可用的 kubernetes 集群(LoadBalancer 和 PersistentVolume 非必需)
  * kubectl
 {{< /requirements >}}
 
 
-As the _apache/ozone_ docker images are available from the dockerhub the deployment process is very similar to Minikube deployment. The only big difference is that we have dedicated set of k8s files for hosted clusters (for example we can use one datanode per host)
-Deploy to kubernetes
+由于 _apache/ozone_ 镜像可以从 Docker Hub 获取到,K8s 上的部署过程和 Minikube 上的部署过程十分相似,唯一的区别是我们为 K8s 部署准备了专门的配置
+文件(比如,我们可以在每个 K8s 节点上部署一个 Datanode)。
 
-`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases.
 
-To deploy to a hosted cluster use the ozone subdirectory:
+ozone 安装包中的 `kubernetes/examples` 目录包含了为不同用例设计的 Kubernetes 部署资源文件。
+
+使用 ozone 子目录进行部署:
 
 ```
 cd kubernetes/examples/ozone
 kubectl apply -f .
 ```
 
-And you can check the results with
+用下面的命令检查结果:
 
 ```
 kubectl get pod
-Access the services
+访问 ozone 服务
 ```
 
-Now you can access any of the services. By default the services are not published but you can access them with port-foward rules.
+现在你可以访问 ozone 的各个服务,默认情况下它们的端口并没有向外开放,不过你可以通过设置端口转发规则来开放外部访问:
 
 ```
 kubectl port-forward s3g-0 9878:9878
diff --git a/hadoop-hdds/docs/content/start/Minikube.zh.md b/hadoop-hdds/docs/content/start/Minikube.zh.md
index ebb249d..9408316 100644
--- a/hadoop-hdds/docs/content/start/Minikube.zh.md
+++ b/hadoop-hdds/docs/content/start/Minikube.zh.md
@@ -1,5 +1,5 @@
 ---
-title: Minikube & Ozone
+title: 在 Minikube 中运行 Ozone
 weight: 21
 ---
 <!---
@@ -21,33 +21,33 @@ weight: 21
 
 
 {{< requirements >}}
- * Working minikube setup
+ * minikube
  * kubectl
 {{< /requirements >}}
 
-`kubernetes/examples` folder of the ozone distribution contains kubernetes deployment resource files for multiple use cases. By default the kubernetes resource files are configured to use `apache/ozone` image from the dockerhub.
+ozone 发行包中的 `kubernetes/examples` 文件夹下包含了多种用途的 kubernetes 部署资源文件,这些资源文件默认使用 Docker Hub 上的 `apache/ozone` 镜像。
 
-To deploy it to minikube use the minikube configuration set:
+使用 minikube 资源集在 minikube 上进行部署:
 
 ```
 cd kubernetes/examples/minikube
 kubectl apply -f .
 ```
 
-And you can check the results with
+使用下面的命令检查结果:
 
 ```
 kubectl get pod
 ```
 
-Note: the kubernetes/examples/minikube resource set is optimized for minikube usage:
+注意:kubernetes/exampls/minikube 资源集为 minikube 部署进行了如下优化:
 
- * You can have multiple datanodes even if you have only one host (in a real production cluster usually you need one datanode per physical host)
- * The services are published with node port
+ * 即使你只有一个主机,也可以运行多个 Datanode(在实际的生产集群中,每个物理主机上通常只运行一个 Datanode)
+ * Ozone 通过不同的节点端口提供服务
 
-## Access the services
+## 访问服务
 
-Now you can access any of the services. For each web endpoint an additional NodeType service is defined in the minikube k8s resource set. NodeType services are available via a generated port of any of the host nodes:
+现在你可以访问 Ozone 的各个服务,minikube 资源集为每个 web 端点额外定义了一个 NodePort 服务,NodePort 服务可以通过指定端口从任意节点访问:
 
 ```bash
 kubectl get svc
@@ -62,9 +62,9 @@ scm          ClusterIP   None            <none>        9876/TCP         27s
 scm-public   NodePort    10.105.231.28   <none>        9876:32171/TCP   27s
 ```
 
-Minikube contains a convenience command to access any of the NodePort services:
+Minikube 为访问任意的 NodePort 服务提供了一个方便的命令:
 
 ```
 minikube service s3g-public
-Opening kubernetes service default/s3g-public in default browser...
+# 此命令会在默认浏览器中打开 default/s3g-public 服务的页面...
 ```
\ No newline at end of file
diff --git a/hadoop-hdds/docs/content/start/OnPrem.zh.md b/hadoop-hdds/docs/content/start/OnPrem.zh.md
index 243743a..2e367b8 100644
--- a/hadoop-hdds/docs/content/start/OnPrem.zh.md
+++ b/hadoop-hdds/docs/content/start/OnPrem.zh.md
@@ -1,5 +1,5 @@
 ---
-title: Ozone On Premise Installation
+title: 物理集群上 Ozone 的安装 
 weight: 20
 
 ---
@@ -20,44 +20,30 @@ weight: 20
   limitations under the License.
 -->
 
-If you are feeling adventurous, you can setup ozone in a real cluster.
-Setting up a real cluster requires us to understand the components of Ozone.
-Ozone is designed to work concurrently with HDFS. However, Ozone is also
-capable of running independently. The components of ozone are the same in both approaches.
+如果你想要有点挑战性,你可以在物理集群上安装 ozone。搭建一个 Ozone 集群需要了解它的各个组件,Ozone 既能和现有的 HDFS 集群并存运行,也可以独立运行。在这两种模式下,需要运行的 Ozone 组件是相同的。
 
-## Ozone Components
+## Ozone 组件 
 
-1. Ozone Manager - Is the server that is in charge of the namespace of Ozone. Ozone Manager is responsible for all volume, bucket and key operations.
-2. Storage Container Manager - Acts as the block manager. Ozone Manager
-requests blocks from SCM, to which clients can write data.
-3. Datanodes - Ozone data node code runs inside the HDFS datanode or in the independent deployment case runs an ozone datanode daemon.
+1. Ozone Manager - 管理 Ozone 命名空间的服务,负责所有对卷、桶和键的操作。
+2. Storage Container Manager - Ozone 中块的管理者,Ozone Manager 从 SCM 请求块,然后用户向块写入数据。
+3. Datanodes - Ozone 的 Datanode 代码既可以运行在 HDFS 的 Datanode 内,也可以独立部署成单独的进程。
 
-## Setting up an Ozone only cluster
+## 搭建一个独立 Ozone 集群
 
-* Please untar the ozone-\<version\> to the directory where you are going
-to run Ozone from. We need Ozone jars on all machines in the cluster. So you
-need to do this on all machines in the cluster.
+* 将 ozone-\<version\> 安装包解压到目标目录,因为 Ozone 的 jar 包需要部署到集群的所有机器上,所以你需要在所有机器上进行此操作。
 
-* Ozone relies on a configuration file called ```ozone-site.xml```. To
-generate a template that you can replace with proper values, please run the
-following command. This will generate a template called ```ozone-site.xml``` at
-the specified path (directory).
+* Ozone 依赖名为 ```ozone-site.xml``` 的配置文件, 运行下面的命令可以在指定目录生成名为 ```ozone-site.xml``` 的配置文件模板,然后你可以将参数替换为合适的值。
 
 {{< highlight bash >}}
 ozone genconf <path>
 {{< /highlight >}}
 
-Let us look at the settings inside the generated file (ozone-site.xml) and
-how they control ozone. Once the right values are defined, this file
-needs to be copied to ```ozone directory/etc/hadoop```.
+我们来看看生成的文件(ozone-site.xml)中都有哪些参数,以及它们是如何影响 ozone 的。当各个参数都配置了合适的值之后,需要把该文件拷贝到 ```ozone directory/etc/hadoop```。
 
-* **ozone.metadata.dirs** Allows Administrators to specify where the
- metadata must reside. Usually you pick your fastest disk (SSD if
- you have them on your nodes). OzoneManager, SCM and datanode will  write the
- metadata to this path. This is a required setting, if this is missing Ozone
- will fail to come up.
-
-  Here is an example,
+* **ozone.metadata.dirs** 管理员通过此参数指定元数据的存储位置,通常应该选择最快的磁盘(比如 SSD,如果节点上有的话),OM、SCM 和 Datanode 
+会将元数据写入此路径。这是个必需的参数,如果不配置它,Ozone 会启动失败。
+ 
+示例如下:
 
 {{< highlight xml >}}
    <property>
@@ -66,26 +52,23 @@ needs to be copied to ```ozone directory/etc/hadoop```.
    </property>
 {{< /highlight >}}
 
-*  **ozone.scm.names**  Storage container manager(SCM) is a distributed block
-  service which is used by ozone. This property allows data nodes to discover
-   SCM's address. Data nodes send heartbeat to SCM.
-   Until HA  feature is  complete, we configure ozone.scm.names to be a
-   single machine.
-
-  Here is an example,
-
+*  **ozone.scm.names**  Storage container manager(SCM) 提供 ozone 使用的分布式块服务,Datanode 通过这个参数来连接 SCM 并向 SCM 发送心跳。Ozone
+ 目前尚未支持 SCM 的 HA,ozone.scm.names 只需配置单个 SCM 地址即可。
+  
+  示例如下:
+  
   {{< highlight xml >}}
-      <property>
+    <property>
         <name>ozone.scm.names</name>
-        <value>scm.hadoop.apache.org</value>
+      <value>scm.hadoop.apache.org</value>
       </property>
   {{< /highlight >}}
+  
+ * **ozone.scm.datanode.id.dir** 每个 Datanode 会生成一个唯一 ID,叫做 Datanode ID。Datanode ID 会被写入此参数所指定路径下名为 datanode.id
+  的文件中,如果该路径不存在,Datanode 会自动创建。
 
- * **ozone.scm.datanode.id.dir** Data nodes generate a Unique ID called Datanode
- ID. This identity is written to the file datanode.id in a directory specified by this path. *Data nodes
-    will create this path if it doesn't exist already.*
+示例如下:
 
-Here is an  example,
 {{< highlight xml >}}
    <property>
       <name>ozone.scm.datanode.id.dir</name>
@@ -93,10 +76,10 @@ Here is an  example,
    </property>
 {{< /highlight >}}
 
-* **ozone.om.address** OM server address. This is used by OzoneClient and
-Ozone File System.
+* **ozone.om.address** OM 服务地址,OzoneClient 和 Ozone 文件系统需要使用此地址。
+
+示例如下:
 
-Here is an  example,
 {{< highlight xml >}}
     <property>
        <name>ozone.om.address</name>
@@ -105,67 +88,67 @@ Here is an  example,
 {{< /highlight >}}
 
 
-## Ozone Settings Summary
+## Ozone 参数汇总
 
 | Setting                        | Value                        | Comment |
 |--------------------------------|------------------------------|------------------------------------------------------------------|
-| ozone.metadata.dirs            | file path                    | The metadata will be stored here.                                |
-| ozone.scm.names                | SCM server name              | Hostname:port or IP:port address of SCM.                      |
-| ozone.scm.block.client.address | SCM server name and port     | Used by services like OM                                         |
-| ozone.scm.client.address       | SCM server name and port     | Used by client-side                                              |
-| ozone.scm.datanode.address     | SCM server name and port     | Used by datanode to talk to SCM                                  |
-| ozone.om.address               | OM server name               | Used by Ozone handler and Ozone file system.                     |
+| ozone.metadata.dirs            | 文件路径                | 元数据存储位置                    |
+| ozone.scm.names                | SCM 服务地址            | SCM的主机名:端口,或者IP:端口  |
+| ozone.scm.block.client.address | SCM 服务地址和端口 | Ozone 内部服务使用(如 OM)                                |
+| ozone.scm.client.address       | SCM 服务地址和端口 | 客户端使用                                        |
+| ozone.scm.datanode.address     | SCM 服务地址和端口 | Datanode 使用                            |
+| ozone.om.address               | OM 服务地址           | Ozone handler 和 Ozone 文件系统使用             |
 
 
-## Startup the cluster
+## 启动集群
 
-Before we boot up the Ozone cluster, we need to initialize both SCM and Ozone Manager.
+在启动 Ozone 集群之前,需要依次初始化 SCM 和 OM。
 
 {{< highlight bash >}}
 ozone scm --init
 {{< /highlight >}}
-This allows SCM to create the cluster Identity and initialize its state.
-The ```init``` command is similar to Namenode format. Init command is executed only once, that allows SCM to create all the required on-disk structures to work correctly.
+
+这条命令会使 SCM 创建集群 ID 并初始化它的状态。
+```init``` 命令和 Namenode 的 ```format``` 命令类似,只需要执行一次,SCM 就可以在磁盘上准备好正常运行所需的数据结构。
+
 {{< highlight bash >}}
 ozone --daemon start scm
 {{< /highlight >}}
 
-Once we know SCM is up and running, we can create an Object Store for our use. This is done by running the following command.
+SCM 启动之后,我们就可以创建对象存储空间,命令如下:
 
 {{< highlight bash >}}
 ozone om --init
 {{< /highlight >}}
 
 
-Once Ozone manager is initialized, we are ready to run the name service.
+OM 初始化完成之后,就可以启动 OM 服务了:
 
 {{< highlight bash >}}
 ozone --daemon start om
 {{< /highlight >}}
 
-At this point Ozone's name services, the Ozone manager, and the block service  SCM is both running.\
-**Please note**: If SCM is not running
-```om --init``` command will fail. SCM start will fail if on-disk data structures are missing. So please make sure you have done both ```scm --init``` and ```om --init``` commands.
+此时 Ozone 的命名服务 OM 和 块服务 SCM 都已运行。\
+**注意**: 如果 SCM 未启动,```om --init``` 命令会失败,同样,如果磁盘上的元数据缺失,SCM 也无法启动,所以请确保 ```scm --init``` 和 ```om --init``` 两条命令都成功执行了。
+
+接下来启动 Datanode,在每个 Datanode 上运行下面的命令:
 
-Now we need to start the data nodes. Please run the following command on each datanode.
 {{< highlight bash >}}
 ozone --daemon start datanode
 {{< /highlight >}}
 
-At this point SCM, Ozone Manager and data nodes are up and running.
+现在 SCM、OM 和所有的 Datanode 都已启动并运行。
+
+***恭喜!你成功地搭建了一个完整的 ozone 集群。***
 
-***Congratulations!, You have set up a functional ozone cluster.***
+## 捷径
 
-## Shortcut
+如果你想简化操作,可以直接运行:
 
-If you want to make your life simpler, you can just run
 {{< highlight bash >}}
 ozone scm --init
 ozone om --init
 start-ozone.sh
 {{< /highlight >}}
 
-This assumes that you have set up the slaves file correctly and ssh
-configuration that allows ssh-ing to all data nodes. This is the same as the
-HDFS configuration, so please refer to HDFS documentation on how to set this
-up.
+这么做的前提是,slaves 文件已经正确编写,并且配置好了到各个 Datanode 的 ssh,这和 HDFS 的配置方式相同,具体方法请查看 HDFS 文档。
diff --git a/hadoop-hdds/docs/content/start/RunningViaDocker.zh.md b/hadoop-hdds/docs/content/start/RunningViaDocker.zh.md
index 9e1e361..be40c24 100644
--- a/hadoop-hdds/docs/content/start/RunningViaDocker.zh.md
+++ b/hadoop-hdds/docs/content/start/RunningViaDocker.zh.md
@@ -1,5 +1,5 @@
 ---
-title: Pseudo-cluster
+title: 伪集群部署 Ozone
 weight: 23
 
 ---
@@ -21,14 +21,12 @@ weight: 23
 -->
 
 {{< requirements >}}
- * docker and docker-compose
+ * docker 和 docker-compose
 {{< /requirements >}}
 
-* Download the Ozone binary tarball and untar it.
+* 下载 Ozone 二进制压缩包并解压。
 
-* Go to the directory where the docker compose files exist and tell
-`docker-compose` to start Ozone in the background. This will start a small
-ozone instance on your machine.
+* 进入 docker compose 文件所在的目录,执行 `docker-compose` 命令,你的机器会启动一个运行在后台的 ozone 伪集群。
 
 {{< highlight bash >}}
 cd compose/ozone/
@@ -36,25 +34,18 @@ cd compose/ozone/
 docker-compose up -d
 {{< /highlight >}}
 
-To verify that ozone is working as expected, let us log into a data node and
-run _freon_, the load generator for Ozone. The ```exec datanode bash``` command
-will open a bash shell on the datanode.
-
-The `ozone freon` command is executed within the datanode container. You can quit freon via CTRL-C any time. The
-```rk``` profile instructs freon to generate random keys.
+为了验证 Ozone 正常运行,我们可以登录到 Datanode 并运行 Ozone 的负载生成工具 _freon_。 ```exec datanode bash``` 命令会在 Datanode 上启动一个 bash,`ozone freon` 命令在 Datanode 所在的容器内执行,你随时可以通过  CTRL-C 退出 freon,命令行选项 ```rk``` 会让 freon 生成随机的键。
 
 {{< highlight bash >}}
 docker-compose exec datanode bash
 ozone freon rk
 {{< /highlight >}}
 
-You can check out the **OzoneManager UI** at http://localhost:9874/ to see the
-activity generated by freon.
-While you are there, please don't forget to check out the ozone configuration explorer.
+你可以通过 http://localhost:9874/ 访问 **OzoneManager UI** 来查看服务端处理 freon 负载的情况,以及浏览 ozone 的配置。
 
-***Congratulations, You have just run your first ozone cluster.***
+***恭喜,你成功运行了你的第一个 ozone 集群。***
 
-To shutdown the cluster, please run
+关闭集群的命令为:
 {{< highlight bash >}}
 docker-compose down
 {{< /highlight >}}
diff --git a/hadoop-hdds/docs/content/start/StartFromDockerHub.zh.md b/hadoop-hdds/docs/content/start/StartFromDockerHub.zh.md
index e3e7d41..c7e484d 100644
--- a/hadoop-hdds/docs/content/start/StartFromDockerHub.zh.md
+++ b/hadoop-hdds/docs/content/start/StartFromDockerHub.zh.md
@@ -1,5 +1,5 @@
 ---
-title: Simple Single Ozone
+title: 简易 Ozone
 weight: 10
 
 ---
@@ -21,91 +21,76 @@ weight: 10
 -->
 
 {{< requirements >}}
- * Working docker setup
- * AWS CLI (optional)
+ * docker
+ * AWS CLI(可选)
 {{< /requirements >}}
 
-# Ozone in a Single Container
+# 所有 Ozone 服务在单个容器
 
-The easiest way to start up an all-in-one ozone container is to use the latest
-docker image from docker hub:
+启动一个 all-in-one 的 ozone 容器最简单的方法就是使用 Docker Hub 最新的 docker 镜像:
 
 ```bash
 docker run -p 9878:9878 -p 9876:9876 apache/ozone
 ```
-This command will pull down the ozone image from docker hub and start all
-ozone services in a single container. <br>
-This container will run the required metadata servers (Ozone Manager, Storage
-Container Manager) one data node  and the S3 compatible REST server
-(S3 Gateway).
+这个命令会从 Docker Hub 拉取 ozone 镜像并在一个容器中启动所有 ozone 服务,包括必要的元数据服务(Ozone Manager,Storage Container Manager)、一个数据节点和兼容 S3
+ 的 REST 服务(S3 网关)。
 
-# Local multi-container cluster
+# Ozone 服务在多个独立的容器
 
-If you would like to use a more realistic pseudo-cluster where each components
-run in own containers, you can start it with a docker-compose file.
+如果你需要一个更类似生产环境的集群,使用 Ozone 发行包自带的 docker-compose 配置文件可以让 Ozone 服务组件在各自独立的容器中运行。
 
-We have shipped a docker-compose and an enviorment file as part of the
-container image  that is uploaded to docker hub.
+docker-compose 配置文件和一个 environment 文件已经包含在 Docker Hub 的镜像中。
 
-The following commands can be used to extract these files from the image in the docker hub.
+下面的命令可以从镜像中获取到这两个文件:
 ```bash
 docker run apache/ozone cat docker-compose.yaml > docker-compose.yaml
 docker run apache/ozone cat docker-config > docker-config
 ```
 
- Now you can start the cluster with docker-compose:
+现在你可以用 docker-compose 命令来启动集群:
 
 ```bash
 docker-compose up -d
 ```
 
-If you need multiple datanodes, we can just scale it up:
+如果你需要多个数据节点,可以通过下面的命令增加:
 
 ```bash
  docker-compose scale datanode=3
  ```
-# Running S3 Clients
+# 运行 S3 客户端
 
-Once the cluster is booted up and ready, you can verify its status by
-connecting to the SCM's UI at [http://localhost:9876](http://localhost:9876).
+集群启动就绪后,你可以连接 SCM 的 UI 来验证它的状态,地址为([http://localhost:9876](http://localhost:9876))。
 
-The S3 gateway endpoint will be exposed at port 9878. You can use Ozone's S3
-support as if you are working against the real S3.
+S3 网关的端口为 9878,如果你正在使用 S3 作为存储方案,可以考虑 Ozone 的 S3 功能。
 
 
-Here is how you create buckets from command line:
+从命令行创建桶的命令为:
 
 ```bash
 aws s3api --endpoint http://localhost:9878/ create-bucket --bucket=bucket1
 ```
 
-Only notable difference in the above command line is the fact that you have
-to tell the _endpoint_ address to the aws s3api command.
+唯一的区别在于你需要在运行 aws s3api 命令的时候用 --endpoint 选项指定 ozone S3 网关的地址。
 
-Now let us put a simple file into the S3 Bucket hosted by Ozone. We will
-start by creating a temporary file that we can upload to Ozone via S3 support.
+下面我们来把一个简单的文件存入 Ozone 的 S3 桶中,首先创建一个用来上传的临时文件:
 ```bash
 ls -1 > /tmp/testfile
  ```
- This command creates a temporary file that
- we can upload to Ozone. The next command actually uploads to Ozone's S3
- bucket using the standard aws s3 command line interface.
+ 这个命令创建了一个用来上传到 Ozone 的临时文件,下面的命令用标准的 aws s3 命令行接口把这个文件上传到了 Ozone 的 S3 桶中:
 
 ```bash
 aws s3 --endpoint http://localhost:9878 cp --storage-class REDUCED_REDUNDANCY  /tmp/testfile  s3://bucket1/testfile
 ```
 <div class="alert alert-info" role="alert">
-Note: REDUCED_REDUNDANCY is required for the single container ozone, since it
- has a single datanode. </div>
-We can now verify that file got uploaded by running the list command against
-our bucket.
+注意:对于单容器 ozone 来说,REDUCED_REDUNDANCY 参数是必需的,因为它只有一个数据节点。</div>
+我们可以对桶运行 list 命令来验证文件是否上传成功:
 
 ```bash
 aws s3 --endpoint http://localhost:9878 ls s3://bucket1/testfile
 ```
 
-<div class="alert alert-info" role="alert"> You can also check the internal
-bucket browser supported by Ozone S3 interface by clicking on the below link.
+<div class="alert alert-info" role="alert"> 你也可以点击下面的链接,通过 Ozone S3 网关自带的浏览器去查看桶内的文件。
 <br>
 </div>
 http://localhost:9878/bucket1?browser
diff --git a/hadoop-hdds/docs/content/start/_index.zh.md b/hadoop-hdds/docs/content/start/_index.zh.md
index d2bbf1d..6f9288b 100644
--- a/hadoop-hdds/docs/content/start/_index.zh.md
+++ b/hadoop-hdds/docs/content/start/_index.zh.md
@@ -36,8 +36,7 @@ Ozone 的安装和运行有多种方式,支持从简单的本地节点 docker
 
 你可以通过 Docker Hub 来运行 Ozone,无需下载官方发行包,这让探索 Ozone 十分容易。
 <br />
-  {{<card title="在 Docker 中启动 ozone" link="start/StartFromDockerHub.zh.md" link-text="Ozone In Docker" image="start
-  /docker.png">}}
+  {{<card title="在 Docker 中启动 ozone" link="start/StartFromDockerHub.zh.md" link-text="Ozone In Docker" image="start/docker.png">}}
   启动一个 ozone 集群来探索其功能的最简易的方式就是通过 docker 来启动 ozone。
   {{</card>}}
 
diff --git a/hadoop-hdds/docs/dev-support/bin/generate-site.sh b/hadoop-hdds/docs/dev-support/bin/generate-site.sh
index d8b5d48..4dfbebc 100755
--- a/hadoop-hdds/docs/dev-support/bin/generate-site.sh
+++ b/hadoop-hdds/docs/dev-support/bin/generate-site.sh
@@ -14,6 +14,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+set -eu
+
 DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
 DOCDIR="$DIR/../.."
 
diff --git a/hadoop-hdds/framework/pom.xml b/hadoop-hdds/framework/pom.xml
index 40574e3..fa6cf28 100644
--- a/hadoop-hdds/framework/pom.xml
+++ b/hadoop-hdds/framework/pom.xml
@@ -49,5 +49,30 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>mockito-all</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+      <version>${hadoop.version}</version>
+      <exclusions>
+        <exclusion>
+          <groupId>com.squareup.okhttp</groupId>
+          <artifactId>okhttp</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
   </dependencies>
 </project>
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java
index b6ba4f6..a7b458f 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/BaseHttpServer.java
@@ -17,29 +17,30 @@
 
 package org.apache.hadoop.hdds.server;
 
+import javax.servlet.http.HttpServlet;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.Optional;
+import java.util.OptionalInt;
+
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.conf.HddsConfServlet;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.http.HttpServer2;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 
+import org.apache.commons.lang3.StringUtils;
+import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
 import org.eclipse.jetty.webapp.WebAppContext;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import javax.servlet.http.HttpServlet;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Optional;
-import java.util.OptionalInt;
-
-import static org.apache.hadoop.hdds.HddsUtils.getHostNameFromConfigKeys;
-import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
-
 /**
  * Base class for HTTP server of the Ozone related components.
  */
@@ -48,6 +49,8 @@ public abstract class BaseHttpServer {
   private static final Logger LOG =
       LoggerFactory.getLogger(BaseHttpServer.class);
   protected static final String PROMETHEUS_SINK = "PROMETHEUS_SINK";
+  protected static final String JETTY_BASETMPDIR =
+      "org.eclipse.jetty.webapp.basetempdir";
 
   private HttpServer2 httpServer;
   private final Configuration conf;
@@ -82,12 +85,12 @@ public abstract class BaseHttpServer {
           name, getSpnegoPrincipal(), getKeytabFile());
 
       final boolean xFrameEnabled = conf.getBoolean(
-          DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED,
-          DFSConfigKeys.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
+          DFSConfigKeysLegacy.DFS_XFRAME_OPTION_ENABLED,
+          DFSConfigKeysLegacy.DFS_XFRAME_OPTION_ENABLED_DEFAULT);
 
       final String xFrameOptionValue = conf.getTrimmed(
-          DFSConfigKeys.DFS_XFRAME_OPTION_VALUE,
-          DFSConfigKeys.DFS_XFRAME_OPTION_VALUE_DEFAULT);
+          DFSConfigKeysLegacy.DFS_XFRAME_OPTION_VALUE,
+          DFSConfigKeysLegacy.DFS_XFRAME_OPTION_VALUE_DEFAULT);
 
       builder.configureXFrame(xFrameEnabled).setXFrameOption(xFrameOptionValue);
 
@@ -114,8 +117,13 @@ public abstract class BaseHttpServer {
                 + "production!");
         httpServer.addServlet("profile", "/prof", ProfileServlet.class);
       }
-    }
 
+      String baseDir = conf.get(OzoneConfigKeys.OZONE_HTTP_BASEDIR);
+      if (!StringUtils.isEmpty(baseDir)) {
+        httpServer.getWebAppContext().setAttribute(JETTY_BASETMPDIR, baseDir);
+        LOG.info("HTTP server of {} uses base directory {}", name, baseDir);
+      }
+    }
   }
 
   /**
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
index e8ce3f9..6991554 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/events/EventQueue.java
@@ -61,6 +61,8 @@ public class EventQueue implements EventPublisher, AutoCloseable {
 
   private static final Gson TRACING_SERIALIZER = new GsonBuilder().create();
 
+  private boolean isSilent = false;
+
   public <PAYLOAD, EVENT_TYPE extends Event<PAYLOAD>> void addHandler(
       EVENT_TYPE event, EventHandler<PAYLOAD> handler) {
     this.addHandler(event, handler, generateHandlerName(handler));
@@ -180,7 +182,9 @@ public class EventQueue implements EventPublisher, AutoCloseable {
       }
 
     } else {
-      LOG.warn("No event handler registered for event {}", event);
+      if (!isSilent) {
+        LOG.warn("No event handler registered for event {}", event);
+      }
     }
 
   }
@@ -258,4 +262,11 @@ public class EventQueue implements EventPublisher, AutoCloseable {
     });
   }
 
+  /**
+   * Dont log messages when there are no consumers of a message.
+   * @param silent flag.
+   */
+  public void setSilent(boolean silent) {
+    isSilent = silent;
+  }
 }
diff --git a/hadoop-hdds/pom.xml b/hadoop-hdds/pom.xml
index 17bcab4..faa419b 100644
--- a/hadoop-hdds/pom.xml
+++ b/hadoop-hdds/pom.xml
@@ -197,56 +197,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
   </dependencyManagement>
   <dependencies>
     <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <version>${hadoop.version}</version>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs-client</artifactId>
-      <version>${hadoop.version}</version>
-      <exclusions>
-        <exclusion>
-          <groupId>com.squareup.okhttp</groupId>
-          <artifactId>okhttp</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-common</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.hadoop</groupId>
-      <artifactId>hadoop-hdfs</artifactId>
-      <version>${hadoop.version}</version>
-      <scope>test</scope>
-      <type>test-jar</type>
-    </dependency>
-    <dependency>
-      <groupId>info.picocli</groupId>
-      <artifactId>picocli</artifactId>
-      <version>3.9.6</version>
-    </dependency>
-    <dependency>
-      <groupId>com.google.protobuf</groupId>
-      <artifactId>protobuf-java</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
-      <groupId>com.google.guava</groupId>
-      <artifactId>guava</artifactId>
-      <scope>compile</scope>
-    </dependency>
-    <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
       <scope>test</scope>
diff --git a/hadoop-hdds/server-scm/pom.xml b/hadoop-hdds/server-scm/pom.xml
index 68a5cd8..a591c00 100644
--- a/hadoop-hdds/server-scm/pom.xml
+++ b/hadoop-hdds/server-scm/pom.xml
@@ -53,6 +53,27 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-hdds-docs</artifactId>
     </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+    </dependency>
+
+    <dependency>
+      <groupId>com.github.spotbugs</groupId>
+      <artifactId>spotbugs</artifactId>
+      <scope>provided</scope>
+    </dependency>
+
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs-client</artifactId>
+      <exclusions>
+        <exclusion>
+          <groupId>com.squareup.okhttp</groupId>
+          <artifactId>okhttp</artifactId>
+        </exclusion>
+      </exclusions>
+    </dependency>
 
     <dependency>
       <groupId>org.apache.hadoop</groupId>
@@ -67,10 +88,7 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <version>1.3</version>
       <scope>test</scope>
     </dependency>
-    <dependency>
-      <groupId>io.dropwizard.metrics</groupId>
-      <artifactId>metrics-core</artifactId>
-    </dependency>
+
     <dependency>
       <groupId>org.assertj</groupId>
       <artifactId>assertj-core</artifactId>
@@ -95,15 +113,26 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.hamcrest</groupId>
       <artifactId>hamcrest-all</artifactId>
       <version>1.3</version>
+      <scope>test</scope>
     </dependency>
     <dependency>
       <groupId>org.bouncycastle</groupId>
       <artifactId>bcprov-jdk15on</artifactId>
     </dependency>
+
     <dependency>
-      <groupId>com.github.spotbugs</groupId>
-      <artifactId>spotbugs</artifactId>
-      <scope>provided</scope>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-common</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdfs</artifactId>
+      <version>${hadoop.version}</version>
+      <scope>test</scope>
+      <type>test-jar</type>
     </dependency>
   </dependencies>
   <build>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index f0411d2..e08dd8c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -87,8 +87,7 @@ public class SCMContainerManager implements ContainerManager {
   public SCMContainerManager(final Configuration conf,
       PipelineManager pipelineManager) throws IOException {
 
-    final File metaDir = ServerUtils.getScmDbDir(conf);
-    final File containerDBPath = new File(metaDir, SCM_CONTAINER_DB);
+    final File containerDBPath = getContainerDBPath(conf);
     final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
         OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
 
@@ -579,4 +578,9 @@ public class SCMContainerManager implements ContainerManager {
       }
     }
   }
+
+  protected File getContainerDBPath(Configuration conf) {
+    File metaDir = ServerUtils.getScmDbDir(conf);
+    return new File(metaDir, SCM_CONTAINER_DB);
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index f077e72..d84b75b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -22,16 +22,16 @@ import java.io.IOException;
 import java.net.InetAddress;
 import java.util.ArrayList;
 import java.util.HashMap;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Set;
-import java.util.LinkedList;
 import java.util.UUID;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ScheduledFuture;
 import java.util.stream.Collectors;
 
-import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
+import org.apache.hadoop.hdds.DFSConfigKeysLegacy;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
@@ -52,7 +52,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.CachedDNSToSwitchMapping;
@@ -68,6 +67,7 @@ import org.apache.hadoop.util.ReflectionUtils;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Strings;
+import edu.umd.cs.findbugs.annotations.SuppressFBWarnings;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -118,7 +118,8 @@ public class SCMNodeManager implements NodeManager {
     this.metrics = SCMNodeMetrics.create(this);
     this.clusterMap = networkTopology;
     Class<? extends DNSToSwitchMapping> dnsToSwitchMappingClass =
-        conf.getClass(DFSConfigKeys.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+        conf.getClass(
+            DFSConfigKeysLegacy.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
             TableMapping.class, DNSToSwitchMapping.class);
     DNSToSwitchMapping newInstance = ReflectionUtils.newInstance(
         dnsToSwitchMappingClass, conf);
@@ -126,8 +127,8 @@ public class SCMNodeManager implements NodeManager {
         ((newInstance instanceof CachedDNSToSwitchMapping) ? newInstance
             : new CachedDNSToSwitchMapping(newInstance));
     this.useHostname = conf.getBoolean(
-        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
-        DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
+        DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME,
+        DFSConfigKeysLegacy.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
   }
 
   private void registerMXBean() {
@@ -282,11 +283,12 @@ public class SCMNodeManager implements NodeManager {
    * Add an entry to the dnsToUuidMap, which maps hostname / IP to the DNs
    * running on that host. As each address can have many DNs running on it,
    * this is a one to many mapping.
+   *
    * @param dnsName String representing the hostname or IP of the node
-   * @param uuid String representing the UUID of the registered node.
+   * @param uuid    String representing the UUID of the registered node.
    */
-  @SuppressFBWarnings(value="AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
-      justification="The method is synchronized and this is the only place "+
+  @SuppressFBWarnings(value = "AT_OPERATION_SEQUENCE_ON_CONCURRENT_ABSTRACTION",
+      justification = "The method is synchronized and this is the only place " +
           "dnsToUuidMap is modified")
   private synchronized void addEntryTodnsToUuidMap(
       String dnsName, String uuid) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
index 86ad5ee..58a8fd7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
@@ -35,7 +35,7 @@ import java.util.Map;
 /**
  * Creates pipeline based on replication type.
  */
-public final class PipelineFactory {
+public class PipelineFactory {
 
   private Map<ReplicationType, PipelineProvider> providers;
 
@@ -49,6 +49,9 @@ public final class PipelineFactory {
             eventPublisher));
   }
 
+  protected PipelineFactory() {
+  }
+
   @VisibleForTesting
   void setProvider(ReplicationType replicationType,
                      PipelineProvider provider) {
@@ -73,4 +76,14 @@ public final class PipelineFactory {
   public void shutdown() {
     providers.values().forEach(provider -> provider.shutdown());
   }
+
+  @VisibleForTesting
+  public Map<ReplicationType, PipelineProvider> getProviders() {
+    return providers;
+  }
+
+  protected void setProviders(
+      Map<ReplicationType, PipelineProvider> providers) {
+    this.providers = providers;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index 779008f..0855295 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -42,6 +42,8 @@ public interface PipelineManager extends Closeable, PipelineManagerMXBean {
 
   Pipeline getPipeline(PipelineID pipelineID) throws PipelineNotFoundException;
 
+  boolean containsPipeline(PipelineID pipelineID);
+
   List<Pipeline> getPipelines();
 
   List<Pipeline> getPipelines(ReplicationType type);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
index a7e2bf1..9df5f4e 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineReportHandler.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
-import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
+import org.apache.hadoop.hdds.scm.safemode.SafeModeManager;
 import org.apache.hadoop.hdds.scm.server
     .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
@@ -51,10 +51,10 @@ public class PipelineReportHandler implements
       PipelineReportHandler.class);
   private final PipelineManager pipelineManager;
   private final Configuration conf;
-  private final SCMSafeModeManager scmSafeModeManager;
+  private final SafeModeManager scmSafeModeManager;
   private final boolean pipelineAvailabilityCheck;
 
-  public PipelineReportHandler(SCMSafeModeManager scmSafeModeManager,
+  public PipelineReportHandler(SafeModeManager scmSafeModeManager,
       PipelineManager pipelineManager, Configuration conf) {
     Preconditions.checkNotNull(pipelineManager);
     this.scmSafeModeManager = scmSafeModeManager;
@@ -87,8 +87,8 @@ public class PipelineReportHandler implements
     }
   }
 
-  private void processPipelineReport(PipelineReport report, DatanodeDetails dn,
-      EventPublisher publisher) throws IOException {
+  protected void processPipelineReport(PipelineReport report,
+      DatanodeDetails dn, EventPublisher publisher) throws IOException {
     PipelineID pipelineID = PipelineID.getFromProtobuf(report.getPipelineID());
     Pipeline pipeline;
     try {
@@ -102,12 +102,8 @@ public class PipelineReportHandler implements
       return;
     }
 
-    pipeline.reportDatanode(dn);
-    // ONE replica pipeline doesn't have leader flag
-    if (report.getIsLeader() ||
-        pipeline.getFactor() == HddsProtos.ReplicationFactor.ONE) {
-      pipeline.setLeaderId(dn.getUuid());
-    }
+    setReportedDatanode(pipeline, dn);
+    setPipelineLeaderId(report, pipeline, dn);
 
     if (pipeline.getPipelineState() == Pipeline.PipelineState.ALLOCATED) {
       LOGGER.info("Pipeline {} {} reported by {}", pipeline.getFactor(),
@@ -120,4 +116,24 @@ public class PipelineReportHandler implements
       }
     }
   }
+
+
+  protected void setReportedDatanode(Pipeline pipeline, DatanodeDetails dn)
+      throws IOException {
+    pipeline.reportDatanode(dn);
+  }
+
+  protected void setPipelineLeaderId(PipelineReport report,
+                                     Pipeline pipeline,
+                                     DatanodeDetails dn) {
+    // ONE replica pipeline doesn't have leader flag
+    if (report.getIsLeader() ||
+        pipeline.getFactor() == HddsProtos.ReplicationFactor.ONE) {
+      pipeline.setLeaderId(dn.getUuid());
+    }
+  }
+
+  protected PipelineManager getPipelineManager() {
+    return pipelineManager;
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 1842a8d..bb56a03 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -38,18 +38,18 @@ import java.util.NavigableSet;
  * state. All the read and write operations in PipelineStateMap are protected
  * by a read write lock.
  */
-class PipelineStateManager {
+public class PipelineStateManager {
 
   private static final Logger LOG =
       LoggerFactory.getLogger(PipelineStateManager.class);
 
   private final PipelineStateMap pipelineStateMap;
 
-  PipelineStateManager() {
+  public PipelineStateManager() {
     this.pipelineStateMap = new PipelineStateMap();
   }
 
-  void addPipeline(Pipeline pipeline) throws IOException {
+  public void addPipeline(Pipeline pipeline) throws IOException {
     pipelineStateMap.addPipeline(pipeline);
     LOG.info("Created pipeline {}", pipeline);
   }
@@ -158,4 +158,9 @@ class PipelineStateManager {
     pipelineStateMap
         .updatePipelineState(pipelineID, PipelineState.DORMANT);
   }
+
+  public void updatePipelineState(PipelineID id, PipelineState newState)
+      throws PipelineNotFoundException {
+    pipelineStateMap.updatePipelineState(id, newState);
+  }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index dc13804..32aa7b6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -72,8 +72,8 @@ public class SCMPipelineManager implements PipelineManager {
       LoggerFactory.getLogger(SCMPipelineManager.class);
 
   private final ReadWriteLock lock;
-  private final PipelineFactory pipelineFactory;
-  private final PipelineStateManager stateManager;
+  private PipelineFactory pipelineFactory;
+  private PipelineStateManager stateManager;
   private final BackgroundPipelineCreator backgroundPipelineCreator;
   private Scheduler scheduler;
   private MetadataStore pipelineStore;
@@ -89,19 +89,29 @@ public class SCMPipelineManager implements PipelineManager {
   public SCMPipelineManager(Configuration conf, NodeManager nodeManager,
       EventPublisher eventPublisher)
       throws IOException {
+    this(conf, nodeManager, eventPublisher, null, null);
... 13213 lines suppressed ...


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org