You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by so...@apache.org on 2020/01/10 16:53:50 UTC
[hadoop-ozone] 01/01: Merge branch 'master' into HDDS-1880-Decom
This is an automated email from the ASF dual-hosted git repository.
sodonnell pushed a commit to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 64e25a9fdc26b2cc6b1a933e358d8078cf3d5701
Merge: 692420f 25b4baf
Author: S O'Donnell <so...@cloudera.com>
AuthorDate: Fri Jan 10 16:52:19 2020 +0000
Merge branch 'master' into HDDS-1880-Decom
.github/workflows/post-commit.yml | 6 -
.github/workflows/pr.yml | 6 -
LICENSE.txt | 53 +-
NOTICE.txt | 5 +-
.../hadoop/hdds/scm/XceiverClientManager.java | 60 +-
.../hadoop/hdds/scm/storage/BlockInputStream.java | 34 +-
.../hadoop/hdds/scm/storage/CommitWatcher.java | 23 +-
.../hdds/scm/storage/TestBlockInputStream.java | 73 +-
.../org/apache/hadoop/hdds/HddsConfigKeys.java | 9 +-
.../java/org/apache/hadoop/hdds/HddsUtils.java | 26 +
.../hadoop/hdds/conf/OzoneConfiguration.java | 103 +-
.../org/apache/hadoop/hdds/ratis/RatisHelper.java | 26 +-
.../apache/hadoop/hdds/recon/ReconConfigKeys.java | 45 +-
.../org/apache/hadoop/hdds/recon/package-info.java | 18 +-
.../hadoop/hdds/scm/ByteStringConversion.java | 16 +-
.../java/org/apache/hadoop/hdds/scm/ScmConfig.java | 30 +-
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 7 -
.../apache/hadoop/hdds/scm/XceiverClientSpi.java | 23 +-
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 25 +-
.../hdds/scm/storage/ContainerProtocolCalls.java | 5 +
.../hadoop/hdds/security/x509/SecurityConfig.java | 49 +-
.../certificate/authority/DefaultCAServer.java | 4 +-
.../client/DefaultCertificateClient.java | 2 +-
.../security/x509/certificate/utils/CRLCodec.java | 197 +
.../hadoop/hdds/utils/BackgroundService.java | 10 +-
.../java/org/apache/hadoop/hdds/utils/Cache.java | 31 +-
.../hadoop/hdds/utils/ResourceLimitCache.java | 91 +
.../hadoop/hdds/utils/ResourceSemaphore.java | 170 +
.../hadoop/hdds/utils/RocksDBStoreMBean.java | 2 +-
.../org/apache/hadoop/hdds/utils/Scheduler.java | 31 +-
.../hadoop/hdds/utils/db/ByteArrayCodec.java | 39 +-
.../apache/hadoop/hdds/utils/db/CodecRegistry.java | 1 +
.../hadoop/hdds/utils/db/RocksDBConfiguration.java | 35 +-
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 13 +-
.../apache/hadoop/ozone/common/ChunkBuffer.java | 47 +-
.../common/ChunkBufferImplWithByteBuffer.java | 35 +-
.../common/ChunkBufferImplWithByteBufferList.java | 215 ++
.../ozone/common/IncrementalChunkBuffer.java | 29 +-
hadoop-hdds/common/src/main/proto/hdds.proto | 1 +
.../common/src/main/resources/ozone-default.xml | 19 +-
.../hadoop/hdds/conf/SimpleConfiguration.java | 47 +-
.../hdds/conf/SimpleConfigurationParent.java | 26 +-
.../hadoop/hdds/conf/TestOzoneConfiguration.java | 19 +-
.../hadoop/hdds/protocol/MockDatanodeDetails.java | 125 +
.../hadoop/hdds/scm/pipeline/MockPipeline.java | 74 +
.../x509/certificate/utils/TestCRLCodec.java | 163 +
.../hadoop/hdds/utils/MockGatheringChannel.java | 74 +
.../hadoop/hdds/utils/TestResourceLimitCache.java | 87 +
.../hadoop/hdds/utils/TestResourceSemaphore.java | 76 +
.../hdds/utils/db/TestTypedRDBTableStore.java | 20 +-
.../hadoop/ozone/audit/TestOzoneAuditLogger.java | 10 +-
.../apache/hadoop/ozone/audit/package-info.java | 2 +-
.../hadoop/ozone/common/TestChunkBuffer.java | 90 +-
.../TestChunkBufferImplWithByteBufferList.java | 80 +
.../{log4j2.properties => auditlog.properties} | 0
.../java/org/apache/hadoop/hdds/conf/Config.java | 2 +-
.../hadoop/hdds/conf/ConfigFileGenerator.java | 77 +-
.../hadoop/hdds/conf/ConfigurationExample.java | 26 +-
.../hdds/conf/ConfigurationExampleGrandParent.java | 37 +-
.../hdds/conf/ConfigurationExampleParent.java | 38 +-
.../hadoop/hdds/conf/TestConfigFileGenerator.java | 60 +
hadoop-hdds/container-service/pom.xml | 22 +-
.../org/apache/hadoop/hdds/scm/HddsServerUtil.java | 22 +
.../common/statemachine/DatanodeConfiguration.java | 65 +-
.../common/statemachine/DatanodeStateMachine.java | 6 +-
.../common/statemachine/EndpointStateMachine.java | 42 +-
.../common/statemachine/SCMConnectionManager.java | 26 +-
.../commandhandler/CommandDispatcher.java | 2 +-
.../CreatePipelineCommandHandler.java | 46 +-
.../common/states/datanode/InitDatanodeState.java | 7 +-
.../states/endpoint/VersionEndpointTask.java | 65 +-
.../server/ratis/ContainerStateMachine.java | 62 +-
.../server/ratis/RatisServerConfiguration.java | 4 +-
.../transport/server/ratis/XceiverServerRatis.java | 32 +-
.../container/common/volume/HddsVolumeChecker.java | 2 +-
.../ozone/container/keyvalue/KeyValueHandler.java | 30 +-
.../container/keyvalue/helpers/ChunkUtils.java | 10 +-
.../keyvalue/impl/ChunkManagerDummyImpl.java | 7 +-
.../container/keyvalue/impl/ChunkManagerImpl.java | 13 +-
.../keyvalue/interfaces/ChunkManager.java | 13 +-
.../ozoneimpl/ContainerScrubberConfiguration.java | 58 +-
.../ozone/container/ContainerTestHelper.java | 87 +-
.../common/impl/TestContainerPersistence.java | 33 +-
.../TestCreatePipelineCommandHandler.java | 159 +
.../container/keyvalue/TestChunkManagerImpl.java | 18 +-
.../container/keyvalue/helpers/TestChunkUtils.java | 9 +-
hadoop-hdds/docs/config.yaml | 13 +-
hadoop-hdds/docs/content/_index.md | 10 +-
hadoop-hdds/docs/content/_index.zh.md | 38 +
hadoop-hdds/docs/content/beyond/Containers.md | 4 +-
.../index.html => content/concept/Datanodes.zh.md} | 27 +-
hadoop-hdds/docs/content/concept/Overview.md | 2 +-
.../index.html => content/concept/_index.zh.md} | 29 +-
.../index.html => content/interface/JavaApi.zh.md} | 27 +-
.../index.html => content/interface/S3.zh.md} | 26 +-
hadoop-hdds/docs/content/security/SecureOzone.md | 6 +-
.../index.html => content/shell/_index.zh.md} | 27 +-
hadoop-hdds/docs/content/start/FromSource.zh.md | 68 +
hadoop-hdds/docs/content/start/Kubernetes.zh.md | 53 +
hadoop-hdds/docs/content/start/Minikube.zh.md | 70 +
hadoop-hdds/docs/content/start/OnPrem.zh.md | 171 +
.../docs/content/start/RunningViaDocker.zh.md | 61 +
.../docs/content/start/StartFromDockerHub.zh.md | 111 +
hadoop-hdds/docs/content/start/_index.zh.md | 85 +
.../themes/ozonedoc/layouts/_default/section.html | 4 +
.../themes/ozonedoc/layouts/_default/single.html | 8 +-
.../docs/themes/ozonedoc/layouts/index.html | 4 +-
.../{index.html => partials/languages.html} | 27 +-
.../themes/ozonedoc/layouts/partials/navbar.html | 2 +-
.../org/apache/hadoop/hdds/server/ServerUtils.java | 2 +-
hadoop-hdds/pom.xml | 7 +
.../hdds/scm/container/ReplicationManager.java | 34 +-
.../hdds/scm/container/SCMContainerManager.java | 48 +-
.../scm/container/states/ContainerStateMap.java | 2 +-
.../apache/hadoop/hdds/scm/metadata/LongCodec.java | 45 -
.../hdds/scm/metadata/SCMMetadataStoreRDBImpl.java | 1 -
.../scm/pipeline/BackgroundPipelineCreator.java | 2 +-
.../hdds/scm/pipeline/PipelineActionHandler.java | 2 +-
.../hdds/scm/pipeline/PipelineStateManager.java | 7 +-
.../hdds/scm/pipeline/RatisPipelineProvider.java | 11 +-
.../hdds/scm/pipeline/SCMPipelineManager.java | 2 +-
.../scm/server/OzoneStorageContainerManager.java | 41 +-
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 18 +-
.../hdds/scm/server/SCMHTTPServerConfig.java | 21 +-
.../hdds/scm/server/SCMSecurityProtocolServer.java | 2 +-
.../hadoop/hdds/scm/server/SCMStorageConfig.java | 6 +
.../hdds/scm/server/StorageContainerManager.java | 5 +-
.../org/apache/hadoop/hdds/scm/HddsTestUtils.java | 5 +-
.../java/org/apache/hadoop/hdds/scm/TestUtils.java | 74 +-
.../command/TestCommandStatusReportHandler.java | 3 +-
.../hadoop/hdds/scm/container/MockNodeManager.java | 4 +-
.../container/TestCloseContainerEventHandler.java | 4 +-
.../scm/container/TestContainerActionsHandler.java | 4 +-
.../scm/container/TestContainerStateManager.java | 12 +-
.../TestIncrementalContainerReportHandler.java | 20 +-
.../hdds/scm/container/TestReplicationManager.java | 4 +-
.../scm/container/TestSCMContainerManager.java | 5 +-
.../algorithms/TestContainerPlacementFactory.java | 4 +-
.../TestSCMContainerPlacementCapacity.java | 4 +-
.../TestSCMContainerPlacementRackAware.java | 6 +-
.../TestSCMContainerPlacementRandom.java | 4 +-
.../states/TestContainerReplicaCount.java | 4 +-
.../hdds/scm/node/TestContainerPlacement.java | 3 +-
.../hdds/scm/node/TestDatanodeAdminMonitor.java | 26 +-
.../scm/node/TestDatanodeAdminNodeDetails.java | 8 +-
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 19 +-
.../hdds/scm/node/TestNodeDecommissionManager.java | 4 +-
.../hdds/scm/node/TestNodeReportHandler.java | 3 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 10 +-
.../hadoop/hdds/scm/node/TestStatisticsUpdate.java | 5 +-
.../scm/pipeline/TestPipelineStateManager.java | 9 +-
.../scm/pipeline/TestRatisPipelineProvider.java | 6 +-
.../scm/pipeline/TestSimplePipelineProvider.java | 7 +-
.../scm/server/TestSCMBlockProtocolServer.java | 4 +-
.../server/TestSCMDatanodeHeartbeatDispatcher.java | 8 +-
.../ozone/container/common/TestEndPoint.java | 21 +-
.../hdds/scm/cli/container/ListSubcommand.java | 4 +-
.../apache/hadoop/ozone/client/ObjectStore.java | 5 +-
.../ozone/client/io/BlockOutputStreamEntry.java | 9 +-
.../client/io/BlockOutputStreamEntryPool.java | 51 +-
.../hadoop/ozone/client/io/KeyInputStream.java | 52 +-
.../hadoop/ozone/client/io/KeyOutputStream.java | 152 +-
.../ozone/client/protocol/ClientProtocol.java | 13 +
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 46 +-
.../main/java/org/apache/hadoop/ozone/OmUtils.java | 3 +-
.../org/apache/hadoop/ozone/audit/OMAction.java | 1 +
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 7 -
.../hadoop/ozone/om/exceptions/OMException.java | 4 +-
.../hadoop/ozone/om/helpers/OzoneFileStatus.java | 14 +-
.../ozone/om/protocol/OzoneManagerProtocol.java | 15 +-
...OzoneManagerProtocolClientSideTranslatorPB.java | 39 +
.../OzoneDelegationTokenSecretManager.java | 10 +-
.../ozone/security/OzoneTokenIdentifier.java | 8 +-
.../hadoop/ozone/security/acl/OzoneAclConfig.java | 25 +-
.../src/main/proto/OzoneManagerProtocol.proto | 25 +-
.../java/org/apache/hadoop/ozone/TestOmUtils.java | 35 +
.../org/apache/hadoop/ozone/csi/CsiServer.java | 57 +-
hadoop-ozone/dev-support/checks/integration.sh | 2 +-
hadoop-ozone/dev-support/checks/unit.sh | 2 +-
.../dist/dev-support/bin/dist-tar-stitching | 6 +-
.../dist/src/main/assemblies/ozone-src.xml | 4 +-
.../docker-image/docker-krb5/Dockerfile-krb5 | 2 +-
.../docker-image/docker-krb5/README.md | 0
.../docker-image/docker-krb5/kadm5.acl | 0
.../docker-image/docker-krb5/krb5.conf | 0
.../docker-image/docker-krb5/launcher.sh | 0
.../dist/src/main/compose/ozone-hdfs/docker-config | 2 +
.../main/compose/ozone-mr/hadoop27/docker-config | 2 +
.../src/main/compose/ozone-mr/hadoop27/test.sh | 4 +-
.../main/compose/ozone-mr/hadoop31/docker-config | 2 +
.../src/main/compose/ozone-mr/hadoop31/test.sh | 8 +-
.../main/compose/ozone-mr/hadoop32/docker-config | 2 +
.../src/main/compose/ozone-om-ha-s3/docker-config | 2 +
.../src/main/compose/ozone-om-ha/docker-config | 2 +
.../src/main/compose/ozone-topology/docker-config | 2 +
.../dist/src/main/compose/ozone/docker-config | 2 +
.../src/main/compose/ozoneblockade/docker-config | 2 +
.../src/main/compose/ozones3-haproxy/docker-config | 2 +
.../src/main/compose/ozonescripts/docker-config | 2 +
.../compose/ozonesecure-mr/docker-compose.yaml | 5 +-
.../src/main/compose/ozonesecure-mr/docker-config | 2 +
.../docker-image/docker-krb5/kadm5.acl | 20 -
.../docker-image/docker-krb5/krb5.conf | 41 -
.../docker-image/docker-krb5/launcher.sh | 25 -
.../main/compose/ozonesecure/docker-compose.yaml | 5 +-
.../src/main/compose/ozonesecure/docker-config | 2 +
.../docker-image/docker-krb5/Dockerfile-krb5 | 34 -
.../ozonesecure/docker-image/docker-krb5/README.md | 34 -
hadoop-ozone/dist/src/main/compose/testlib.sh | 13 +-
hadoop-ozone/dist/src/main/license/src/LICENSE.txt | 239 --
hadoop-ozone/dist/src/main/license/src/NOTICE.txt | 33 -
.../src/main/smoketest/basic/ozone-shell.robot | 18 +-
.../dist/src/main/smoketest/gdpr/gdpr.robot | 16 +-
.../dist/src/main/smoketest/omha/testOMHA.robot | 4 +-
.../dist/src/main/smoketest/ozonefs/ozonefs.robot | 18 +-
.../src/main/smoketest/s3/MultipartUpload.robot | 2 +-
.../dist/src/main/smoketest/s3/__init__.robot | 2 +-
.../dist/src/main/smoketest/s3/bucketcreate.robot | 2 +-
.../dist/src/main/smoketest/s3/buckethead.robot | 2 +-
.../dist/src/main/smoketest/s3/bucketlist.robot | 2 +-
.../dist/src/main/smoketest/s3/commonawslib.robot | 4 +-
.../dist/src/main/smoketest/s3/objectcopy.robot | 2 +-
.../dist/src/main/smoketest/s3/objectdelete.robot | 2 +-
.../src/main/smoketest/s3/objectmultidelete.robot | 2 +-
.../dist/src/main/smoketest/s3/objectputget.robot | 2 +-
.../mini-chaos-tests/src/test/bin/start-chaos.sh | 16 +-
.../hadoop/ozone/MiniOzoneLoadGenerator.java | 103 +-
.../hadoop/ozone/TestMiniChaosOzoneCluster.java | 20 +-
.../org/apache/hadoop/ozone/utils/LoadBucket.java | 224 +-
.../hadoop/ozone/insight/BaseInsightPoint.java | 5 +-
.../ozone/insight/BaseInsightSubCommand.java | 2 +
.../ozone/insight/ConfigurationSubCommand.java | 10 +-
.../apache/hadoop/ozone/insight/InsightPoint.java | 3 +-
.../apache/hadoop/ozone/insight/LogSubcommand.java | 2 +-
.../ozone/insight/datanode/RatisInsight.java | 44 +-
.../hadoop/ozone/insight/om/KeyManagerInsight.java | 4 +-
.../hadoop/ozone/insight/om/OmProtocolInsight.java | 3 +-
.../ozone/insight/scm/EventQueueInsight.java | 4 +-
.../ozone/insight/scm/NodeManagerInsight.java | 4 +-
.../ozone/insight/scm/ReplicaManagerInsight.java | 4 +-
.../scm/ScmProtocolBlockLocationInsight.java | 3 +-
.../scm/ScmProtocolContainerLocationInsight.java | 3 +-
.../insight/scm/ScmProtocolDatanodeInsight.java | 3 +-
.../insight/scm/ScmProtocolSecurityInsight.java | 3 +-
hadoop-ozone/integration-test/pom.xml | 42 +
.../hadoop/fs/ozone/TestOzoneFSInputStream.java | 72 +-
.../hadoop/fs/ozone/TestOzoneFileInterfaces.java | 21 +
.../hadoop/fs/ozone/TestOzoneFileSystem.java | 0
.../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java | 0
.../hadoop/fs/ozone/TestOzoneFsRenameDir.java | 16 +-
.../ozone/contract/ITestOzoneContractCreate.java | 0
.../ozone/contract/ITestOzoneContractDelete.java | 0
.../ozone/contract/ITestOzoneContractDistCp.java | 0
.../contract/ITestOzoneContractGetFileStatus.java | 0
.../fs/ozone/contract/ITestOzoneContractMkdir.java | 0
.../fs/ozone/contract/ITestOzoneContractOpen.java | 0
.../ozone/contract/ITestOzoneContractRename.java | 0
.../ozone/contract/ITestOzoneContractRootDir.java | 0
.../fs/ozone/contract/ITestOzoneContractSeek.java | 0
.../hadoop/fs/ozone/contract/OzoneContract.java | 0
.../org/apache/hadoop/fs/ozone}/package-info.java | 17 +-
.../hadoop/hdds/scm/TestRatisPipelineLeader.java | 129 +
.../scm/pipeline/TestRatisPipelineProvider.java | 11 +-
.../org/apache/hadoop/ozone/MiniOzoneCluster.java | 13 +
.../apache/hadoop/ozone/MiniOzoneClusterImpl.java | 94 +-
.../apache/hadoop/ozone/TestMiniOzoneCluster.java | 18 +-
.../hadoop/ozone/TestOzoneConfigurationFields.java | 2 +-
.../ozone/container/TestContainerReplication.java | 3 +-
.../transport/server/ratis/TestCSMMetrics.java | 4 +-
.../container/metrics/TestContainerMetrics.java | 7 +-
.../container/ozoneimpl/TestOzoneContainer.java | 15 +-
.../ozoneimpl/TestOzoneContainerRatis.java | 4 +-
.../ozoneimpl/TestOzoneContainerWithTLS.java | 7 +-
.../ozoneimpl/TestSecureOzoneContainer.java | 7 +-
.../container/server/TestContainerServer.java | 11 +-
.../server/TestSecureContainerServer.java | 8 +-
.../hadoop/ozone/freon/TestDataValidate.java | 0
.../freon/TestDataValidateWithDummyContainers.java | 0
.../TestDataValidateWithSafeByteOperations.java | 0
.../TestDataValidateWithUnsafeByteOperations.java | 0
.../freon/TestFreonWithDatanodeFastRestart.java | 0
.../ozone/freon/TestFreonWithDatanodeRestart.java | 0
.../ozone/freon/TestFreonWithPipelineDestroy.java | 0
.../ozone/freon/TestOzoneClientKeyGenerator.java | 95 +
.../hadoop/ozone/freon/TestRandomKeyGenerator.java | 0
.../hadoop/ozone/fsck/TestContainerMapper.java | 0
.../org/apache/hadoop/ozone/om/TestOmMetrics.java | 16 +-
.../org/apache/hadoop/ozone/om/TestOmSQLCli.java | 0
.../org/apache/hadoop/ozone/recon/TestRecon.java | 408 +++
.../hadoop/ozone/scm/node/TestSCMNodeMetrics.java | 5 +-
.../src/test/resources/contract/ozone.xml | 0
hadoop-ozone/ozone-manager/pom.xml | 1 -
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 59 +-
.../java/org/apache/hadoop/ozone/om/OMMetrics.java | 20 +
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 41 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 17 +-
.../ozone/om/ratis/OzoneManagerDoubleBuffer.java | 7 +-
.../ozone/om/ratis/OzoneManagerRatisServer.java | 55 +-
.../ozone/om/ratis/OzoneManagerRatisSnapshot.java | 10 +-
.../ozone/om/ratis/OzoneManagerStateMachine.java | 154 +-
.../om/ratis/utils/OzoneManagerRatisUtils.java | 16 +-
.../hadoop/ozone/om/request/OMClientRequest.java | 23 +
.../request/s3/bucket/S3BucketCreateRequest.java | 18 +-
.../om/request/volume/OMVolumeCreateRequest.java | 26 +-
.../om/request/volume/OMVolumeDeleteRequest.java | 51 +-
.../ozone/om/request/volume/OMVolumeRequest.java | 20 +
.../om/request/volume/OMVolumeSetOwnerRequest.java | 24 +-
.../om/request/volume/OMVolumeSetQuotaRequest.java | 24 +-
.../hadoop/ozone/om/response/OMClientResponse.java | 29 +-
.../response/file/OMDirectoryCreateResponse.java | 28 +-
.../response/s3/bucket/S3BucketCreateResponse.java | 6 +-
.../S3MultipartUploadCommitPartResponse.java | 71 +-
.../om/response/volume/OMVolumeAclOpResponse.java | 2 -
.../om/response/volume/OMVolumeCreateResponse.java | 38 +-
.../om/response/volume/OMVolumeDeleteResponse.java | 42 +-
.../response/volume/OMVolumeSetOwnerResponse.java | 56 +-
.../response/volume/OMVolumeSetQuotaResponse.java | 26 +-
.../protocolPB/OzoneManagerRequestHandler.java | 2 +
.../web/ozShell/bucket/ListBucketHandler.java | 3 +-
.../web/ozShell/volume/ListVolumeHandler.java | 3 +-
.../ozone/om/ScmBlockLocationTestingClient.java | 4 +-
.../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 14 +-
.../hadoop/ozone/om/TestOmMetadataManager.java | 34 +
.../apache/hadoop/ozone/om/TestTrashService.java | 125 +
...tOzoneManagerDoubleBufferWithDummyResponse.java | 2 +-
...TestOzoneManagerDoubleBufferWithOMResponse.java | 3 +-
.../om/ratis/TestOzoneManagerStateMachine.java | 215 ++
.../ozone/om/request/TestOMRequestUtils.java | 23 +-
.../request/volume/TestOMVolumeCreateRequest.java | 51 +-
.../request/volume/TestOMVolumeDeleteRequest.java | 46 +-
.../om/request/volume/TestOMVolumeRequest.java | 30 +
.../volume/TestOMVolumeSetOwnerRequest.java | 31 +
.../volume/TestOMVolumeSetQuotaRequest.java | 38 +-
.../ozone/om/response/TestOMResponseUtils.java | 2 +-
.../volume/TestOMVolumeCreateResponse.java | 7 +-
.../volume/TestOMVolumeDeleteResponse.java | 13 +-
.../volume/TestOMVolumeSetOwnerResponse.java | 11 +-
.../volume/TestOMVolumeSetQuotaResponse.java | 7 +-
.../TestOzoneDelegationTokenSecretManager.java | 10 +-
hadoop-ozone/ozonefs/pom.xml | 47 -
.../fs/ozone/BasicOzoneClientAdapterImpl.java | 71 +-
.../hadoop/fs/ozone/BasicOzoneFileSystem.java | 58 +-
.../apache/hadoop/fs/ozone/FileStatusAdapter.java | 12 +-
.../hadoop/fs/ozone/FilteredClassLoader.java | 1 +
.../hadoop/fs/ozone/OzoneClientAdapterFactory.java | 1 -
.../apache/hadoop/fs/ozone/OzoneFSInputStream.java | 55 +-
.../hadoop/fs/ozone/TestOzoneFSInputStream.java | 204 +-
.../hadoop/fs/ozone/TestReadWriteStatistics.java | 452 +++
hadoop-ozone/pom.xml | 2 +-
hadoop-ozone/recon/pom.xml | 5 +-
.../hadoop/ozone/recon/ConfigurationProvider.java | 17 +-
.../hadoop/ozone/recon/ReconControllerModule.java | 11 +-
.../org/apache/hadoop/ozone/recon/ReconServer.java | 38 +-
.../hadoop/ozone/recon/ReconServerConfigKeys.java | 5 +
.../org/apache/hadoop/ozone/recon/ReconUtils.java | 5 +
.../recon/recovery/ReconOmMetadataManagerImpl.java | 2 +
.../recon/scm/ReconDatanodeProtocolServer.java | 85 +
.../hadoop/ozone/recon/scm/ReconNodeManager.java} | 40 +-
.../ozone/recon/scm/ReconStorageConfig.java} | 39 +-
.../recon/scm/ReconStorageContainerManager.java | 146 +
.../hadoop/ozone/recon/scm/package-info.java} | 18 +-
.../recon/spi/ContainerDBServiceProvider.java | 10 +
.../spi/impl/ContainerDBServiceProviderImpl.java | 13 +
.../recon/spi/impl/ReconContainerDBProvider.java | 2 -
.../ozone/recon/tasks/ContainerKeyMapperTask.java | 8 +-
.../ozone/recon/tasks/FileSizeCountTask.java | 12 +-
.../ozone/recon/tasks/OMDBUpdatesHandler.java | 79 +-
.../webapps/recon/ozone-recon-web/LICENSE | 3781 ++++++++++----------
.../webapps/recon/ozone-recon-web/README.md | 8 +
.../webapps/recon/ozone-recon-web/api/db.json | 231 ++
.../webapps/recon/ozone-recon-web/api/routes.json | 3 +
.../webapps/recon/ozone-recon-web/package.json | 16 +-
.../recon/ozone-recon-web/public/index.html | 12 +-
.../webapps/recon/ozone-recon-web/src/App.less | 12 +
.../webapps/recon/ozone-recon-web/src/App.tsx | 64 +-
.../src/components/Breadcrumbs/Breadcrumbs.tsx | 12 +-
.../src/components/NavBar/NavBar.less | 21 +-
.../src/components/NavBar/NavBar.tsx | 28 +-
.../OverviewCard/OverviewCard.less} | 26 +-
.../src/components/OverviewCard/OverviewCard.tsx | 94 +
.../src/constants/breadcrumbs.constants.tsx | 5 +-
.../webapps/recon/ozone-recon-web/src/routes.tsx | 18 +-
.../breadcrumbs.constants.tsx => utils/common.tsx} | 9 +-
.../{App.less => views/Datanodes/Datanodes.less} | 63 +-
.../src/views/Datanodes/Datanodes.tsx | 187 +
.../src/views/NotFound/NotFound.tsx | 2 +-
.../Overview/Overview.less} | 15 +-
.../src/views/Overview/Overview.tsx | 115 +
.../webapps/recon/ozone-recon-web/yarn.lock | 3307 ++++++++++-------
hadoop-ozone/s3gateway/pom.xml | 1 -
.../apache/hadoop/ozone/s3/AWSV4AuthParser.java | 8 +-
.../hadoop/ozone/s3/OzoneClientProducer.java | 12 +-
.../hadoop/ozone/s3/exception/OS3Exception.java | 2 +-
.../hadoop/ozone/s3/exception/S3ErrorTable.java | 4 +-
.../s3/header/AuthenticationHeaderParser.java | 4 +
.../ozone/s3/header/AuthorizationHeaderV4.java | 18 +-
.../hadoop/ozone/s3/endpoint/TestBucketPut.java | 114 +
hadoop-ozone/tools/pom.xml | 7 -
.../hadoop/ozone/audit/parser/AuditParser.java | 7 +-
.../audit/parser/handler/LoadCommandHandler.java | 4 +-
.../audit/parser/handler/QueryCommandHandler.java | 4 +-
.../parser/handler/TemplateCommandHandler.java | 9 +-
.../hadoop/ozone/freon/DatanodeChunkGenerator.java | 68 +-
.../hadoop/ozone/freon/RandomKeyGenerator.java | 64 +-
.../apache/hadoop/ozone/genesis/GenesisUtil.java | 22 +-
hadoop-ozone/upgrade/pom.xml | 1 -
pom.xml | 9 +-
407 files changed, 12874 insertions(+), 6136 deletions(-)
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
index dfb5961,bfa411d..6c7f89c
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ReplicationManager.java
@@@ -895,23 -815,18 +883,35 @@@ public class ReplicationManager impleme
description = "Timeout for the container replication/deletion commands "
+ "sent to datanodes. After this timeout the command will be "
+ "retried.")
+ private long eventTimeout = 10 * 60 * 1000;
+
-
+ public void setInterval(long interval) {
+ this.interval = interval;
+ }
+
-
public void setEventTimeout(long eventTimeout) {
this.eventTimeout = eventTimeout;
}
++ /**
++ * The number of container replica which must be available for a node to
++ * enter maintenance.
++ */
+ @Config(key = "maintenance.replica.minimum",
+ type = ConfigType.INT,
+ defaultValue = "2",
+ tags = {SCM, OZONE},
+ description = "The minimum number of container replicas which must " +
+ " be available for a node to enter maintenance. If putting a " +
+ " node into maintenance reduces the available replicas for any " +
+ " container below this level, the node will remain in the " +
+ " entering maintenance state until a new replica is created.")
++ private int maintenanceReplicaMinimum = 2;
++
+ public void setMaintenanceReplicaMinimum(int replicaCount) {
+ this.maintenanceReplicaMinimum = replicaCount;
+ }
+
public long getInterval() {
return interval;
}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
index 54c4080,b685ba9..05ea5be
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestContainerPlacementFactory.java
@@@ -19,8 -19,9 +19,8 @@@ package org.apache.hadoop.hdds.scm.cont
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
- import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index fcb4f44,ddca0fa..ac6f6c2
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@@ -24,7 -24,8 +24,7 @@@ import java.util.Map
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
- import org.apache.hadoop.hdds.scm.TestUtils;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
index abf7f9f,992f1c5..699c947
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRackAware.java
@@@ -24,7 -24,8 +24,7 @@@ import java.util.List
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
- import org.apache.hadoop.hdds.scm.TestUtils;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.NetConstants;
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 5edb25f,91509a0..9e8c336
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@@ -22,7 -22,8 +22,7 @@@ import java.util.List
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
- import org.apache.hadoop.hdds.scm.TestUtils;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.node.NodeManager;
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
index 6c239c3,0000000..f6e2187
mode 100644,000000..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerReplicaCount.java
@@@ -1,442 -1,0 +1,442 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.states;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
++import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
- import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
+import org.junit.Before;
+import org.junit.Test;
+import java.util.*;
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertTrue;
+import static org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
+import static org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN;
+import static org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State
+ .DECOMMISSIONED;
+import static org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State
+ .MAINTENANCE;
+import static org.junit.Assert.assertFalse;
+
+/**
+ * Class used to test the ContainerReplicaCount class.
+ */
+public class TestContainerReplicaCount {
+
+ @Before
+ public void setup() {
+ }
+
+ @Test
+ public void testThreeHealthyReplica() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ public void testTwoHealthyReplica() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testOneHealthyReplica() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 2, false);
+ }
+
+ @Test
+ public void testTwoHealthyAndInflightAdd() {
+
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 3, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ @Test
+ /**
+ * This does not schedule a container to be removed, as the inFlight add may
+ * fail and then the delete would make things under-replicated. Once the add
+ * completes there will be 4 healthy and it will get taken care of then.
+ */
+ public void testThreeHealthyAndInflightAdd() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 3, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ /**
+ * As the inflight delete may fail, but as it will make the the container
+ * under replicated, we go ahead and schedule another replica to be added.
+ */
+ public void testThreeHealthyAndInflightDelete() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 1, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ /**
+ * This is NOT sufficiently replicated as the inflight add may fail and the
+ * inflight del could succeed, leaving only 2 healthy replicas.
+ */
+ public void testThreeHealthyAndInflightAddAndInFlightDelete() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 1, 3, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ @Test
+ public void testFourHealthyReplicas() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, true, -1, true);
+ }
+
+ @Test
+ public void testFourHealthyReplicasAndInFlightDelete() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 1, 3, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ public void testFourHealthyReplicasAndTwoInFlightDelete() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 2, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testOneHealthyReplicaRepFactorOne() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 1, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ public void testOneHealthyReplicaRepFactorOneInFlightDelete() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 1, 1, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testTwoHealthyReplicaTwoInflightAdd() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 2, 0, 3, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ /**
+ * From here consider decommission replicas.
+ */
+
+ @Test
+ public void testThreeHealthyAndTwoDecommission() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED, CLOSED,
+ DECOMMISSIONED, DECOMMISSIONED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ public void testOneDecommissionedReplica() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, DECOMMISSIONED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testTwoHealthyOneDecommissionedneInFlightAdd() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, DECOMMISSIONED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 3, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ @Test
+ public void testAllDecommissioned() {
+ Set<ContainerReplica> replica =
+ registerNodes(DECOMMISSIONED, DECOMMISSIONED, DECOMMISSIONED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 3, false);
+ }
+
+ @Test
+ public void testAllDecommissionedRepFactorOne() {
+ Set<ContainerReplica> replica = registerNodes(DECOMMISSIONED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 1, 2);
+ validate(rcnt, false, 1, false);
+
+ }
+
+ @Test
+ public void testAllDecommissionedRepFactorOneInFlightAdd() {
+ Set<ContainerReplica> replica = registerNodes(DECOMMISSIONED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 1, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ @Test
+ public void testOneHealthyOneDecommissioningRepFactorOne() {
+ Set<ContainerReplica> replica = registerNodes(DECOMMISSIONED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 1, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ /**
+ * Maintenance tests from here.
+ */
+
+ @Test
+ public void testOneHealthyTwoMaintenanceMinRepOfTwo() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testOneHealthyThreeMaintenanceMinRepOfTwo() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED,
+ MAINTENANCE, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testOneHealthyTwoMaintenanceMinRepOfOne() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 1);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ public void testOneHealthyThreeMaintenanceMinRepOfTwoInFlightAdd() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED,
+ MAINTENANCE, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 3, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ @Test
+ public void testAllMaintenance() {
+ Set<ContainerReplica> replica =
+ registerNodes(MAINTENANCE, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, false, 2, false);
+ }
+
+ @Test
+ /**
+ * As we have exactly 3 healthy, but then an excess of maintenance copies
+ * we ignore the over-replication caused by the maintenance copies until they
+ * come back online, and then deal with them.
+ */
+ public void testThreeHealthyTwoInMaintenance() {
+ Set<ContainerReplica> replica = registerNodes(CLOSED, CLOSED, CLOSED,
+ MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ /**
+ * This is somewhat similar to testThreeHealthyTwoInMaintenance() except now
+ * one of the maintenance copies has become healthy and we will need to remove
+ * the over-replicated healthy container.
+ */
+ public void testFourHealthyOneInMaintenance() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, CLOSED, CLOSED, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ validate(rcnt, true, -1, true);
+ }
+
+ @Test
+ public void testOneMaintenanceMinRepOfTwoRepFactorOne() {
+ Set<ContainerReplica> replica = registerNodes(MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 1, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testOneMaintenanceMinRepOfTwoRepFactorOneInFlightAdd() {
+ Set<ContainerReplica> replica = registerNodes(MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 1, 2);
+ validate(rcnt, false, 0, false);
+ }
+
+ @Test
+ public void testOneHealthyOneMaintenanceRepFactorOne() {
+ Set<ContainerReplica> replica = registerNodes(MAINTENANCE, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 1, 2);
+ validate(rcnt, true, 0, false);
+ }
+
+ @Test
+ public void testTwoDecomTwoMaintenanceOneInflightAdd() {
+ Set<ContainerReplica> replica =
+ registerNodes(DECOMMISSIONED, DECOMMISSIONED, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 1, 0, 3, 2);
+ validate(rcnt, false, 1, false);
+ }
+
+ @Test
+ public void testHealthyContainerIsHealthy() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, CLOSED);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ assertTrue(rcnt.isHealthy());
+ }
+
+ @Test
+ public void testIsHealthyWithDifferentReplicaStateNotHealthy() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, OPEN);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ assertFalse(rcnt.isHealthy());
+ }
+
+ @Test
+ public void testIsHealthyWithMaintReplicaIsHealthy() {
+ Set<ContainerReplica> replica =
+ registerNodes(CLOSED, CLOSED, MAINTENANCE, MAINTENANCE);
+ ContainerInfo container = createContainer(HddsProtos.LifeCycleState.CLOSED);
+ ContainerReplicaCount rcnt =
+ new ContainerReplicaCount(container, replica, 0, 0, 3, 2);
+ assertTrue(rcnt.isHealthy());
+ }
+
+ private void validate(ContainerReplicaCount rcnt,
+ boolean sufficientlyReplicated, int replicaDelta,
+ boolean overReplicated) {
+ assertEquals(sufficientlyReplicated, rcnt.isSufficientlyReplicated());
+ assertEquals(overReplicated, rcnt.isOverReplicated());
+ assertEquals(replicaDelta, rcnt.additionalReplicaNeeded());
+ }
+
+ private Set<ContainerReplica> registerNodes(
+ ContainerReplicaProto.State... states) {
+ Set<ContainerReplica> replica = new HashSet<>();
+ for (ContainerReplicaProto.State s : states) {
- DatanodeDetails dn = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+ replica.add(new ContainerReplica.ContainerReplicaBuilder()
+ .setContainerID(new ContainerID(1))
+ .setContainerState(s)
+ .setDatanodeDetails(dn)
+ .setOriginNodeId(dn.getUuid())
+ .setSequenceId(1)
+ .build());
+ }
+ return replica;
+ }
+
+ private ContainerInfo createContainer(HddsProtos.LifeCycleState state) {
+ return new ContainerInfo.Builder()
+ .setContainerID(new ContainerID(1).getId())
+ .setState(state)
+ .build();
+ }
+}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index f3b7d8f,0000000..cb78703
mode 100644,000000..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@@ -1,532 -1,0 +1,532 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
++import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
- import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.container.*;
+import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+import org.mockito.Mockito;
+import java.io.IOException;
+import java.util.HashSet;
+import java.util.Set;
+import java.util.concurrent.atomic.AtomicInteger;
+
+import static junit.framework.TestCase.assertEquals;
+import static junit.framework.TestCase.assertTrue;
+import static org.mockito.Mockito.reset;
+
+/**
+ * Tests to ensure the DatanodeAdminMonitor is working correctly. This class
+ * uses mocks or basic implementations of the key classes outside of the
+ * datanodeAdminMonitor to allow it to be tested in isolation.
+ */
+public class TestDatanodeAdminMonitor {
+
+ private SimpleMockNodeManager nodeManager;
+ private OzoneConfiguration conf;
+ private DatanodeAdminMonitorImpl monitor;
+ private DatanodeAdminHandler startAdminHandler;
+ private ReplicationManager repManager;
+ private EventQueue eventQueue;
+
+ @Before
+ public void setup() throws IOException, AuthenticationException {
+ conf = new OzoneConfiguration();
+
+ eventQueue = new EventQueue();
+ startAdminHandler = new DatanodeAdminHandler();
+ eventQueue.addHandler(SCMEvents.START_ADMIN_ON_NODE, startAdminHandler);
+
+ nodeManager = new SimpleMockNodeManager();
+
+ repManager = Mockito.mock(ReplicationManager.class);
+
+ monitor =
+ new DatanodeAdminMonitorImpl(conf, eventQueue, nodeManager, repManager);
+ }
+
+ @After
+ public void teardown() {
+ }
+
+ @Test
+ public void testNodeCanBeQueuedAndCancelled() {
- DatanodeDetails dn = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+ monitor.startMonitoring(dn, 0);
+ assertEquals(1, monitor.getPendingCount());
+
+ monitor.stopMonitoring(dn);
+ assertEquals(0, monitor.getPendingCount());
+ assertEquals(1, monitor.getCancelledCount());
+
+ monitor.startMonitoring(dn, 0);
+ assertEquals(1, monitor.getPendingCount());
+ assertEquals(0, monitor.getCancelledCount());
+ }
+
+ /**
+ * In this test we ensure there are some pipelines for the node being
+ * decommissioned, but there are no containers. Therefore the workflow
+ * must wait until the pipelines have closed before completing the flow.
+ */
+ @Test
+ public void testClosePipelinesEventFiredWhenAdminStarted()
+ throws NodeNotFoundException{
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+ // Ensure the node has some pipelines
+ nodeManager.setPipelines(dn1, 2);
+ // Add the node to the monitor
+ monitor.startMonitoring(dn1, 0);
+ monitor.run();
+ // Ensure a StartAdmin event was fired
+ eventQueue.processAll(20000);
+ assertEquals(1, startAdminHandler.getInvocation());
+ // Ensure a node is now tracked for decommission
+ assertEquals(1, monitor.getTrackedNodeCount());
+ // Ensure the node remains decommissioning
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ // Run the monitor again, and it should remain decommissioning
+ monitor.run();
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+
+ // Clear the pipelines and the node should transition to DECOMMISSIONED
+ nodeManager.setPipelines(dn1, 0);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ /**
+ * In this test, there are no open pipelines and no containers on the node.
+ * Therefore, we expect the decommission flow to finish on the first run
+ * on the monitor, leaving zero nodes tracked and the node in DECOMMISSIONED
+ * state.
+ */
+ @Test
+ public void testDecommissionNodeTransitionsToCompleteWhenNoContainers()
+ throws NodeNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ // Add the node to the monitor. By default we have zero pipelines and
+ // zero containers in the test setup, so the node should immediately
+ // transition to COMPLETED state
+ monitor.startMonitoring(dn1, 0);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ NodeStatus newStatus = nodeManager.getNodeStatus(dn1);
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+ newStatus.getOperationalState());
+ }
+
+ @Test
+ public void testDecommissionNodeWaitsForContainersToReplicate()
+ throws NodeNotFoundException, ContainerNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ nodeManager.setContainers(dn1, generateContainers(3));
+ // Mock Replication Manager to return ContainerReplicaCount's which
+ // always have a DECOMMISSIONED replica.
+ mockGetContainerReplicaCount(
+ HddsProtos.LifeCycleState.CLOSED,
+ ContainerReplicaProto.State.DECOMMISSIONED,
+ ContainerReplicaProto.State.CLOSED,
+ ContainerReplicaProto.State.CLOSED);
+
+ // Run the monitor for the first time and the node will transition to
+ // REPLICATE_CONTAINERS as there are no pipelines to close.
+ monitor.startMonitoring(dn1, 0);
+ monitor.run();
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+
+ // Running the monitor again causes it to remain DECOMMISSIONING
+ // as nothing has changed.
+ monitor.run();
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(0, node.getSufficientlyReplicatedContainers());
+ assertEquals(0, node.getUnHealthyContainers());
+ assertEquals(3, node.getUnderReplicatedContainers());
+
+ // Now change the replicationManager mock to return 3 CLOSED replicas
+ // and the node should complete the REPLICATE_CONTAINERS step, moving to
+ // complete which will end the decommission workflow
+ mockGetContainerReplicaCount(
+ HddsProtos.LifeCycleState.CLOSED,
+ ContainerReplicaProto.State.CLOSED,
+ ContainerReplicaProto.State.CLOSED,
+ ContainerReplicaProto.State.CLOSED);
+
+ monitor.run();
+
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(3, node.getSufficientlyReplicatedContainers());
+ assertEquals(0, node.getUnHealthyContainers());
+ assertEquals(0, node.getUnderReplicatedContainers());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ @Test
+ public void testDecommissionAbortedWhenNodeInUnexpectedState()
+ throws NodeNotFoundException, ContainerNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ nodeManager.setContainers(dn1, generateContainers(3));
+ mockGetContainerReplicaCount(
+ HddsProtos.LifeCycleState.CLOSED,
+ ContainerReplicaProto.State.DECOMMISSIONED,
+ ContainerReplicaProto.State.CLOSED,
+ ContainerReplicaProto.State.CLOSED);
+
+ // Add the node to the monitor, it should have 3 under-replicated containers
+ // after the first run
+ monitor.startMonitoring(dn1, 0);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(3, node.getUnderReplicatedContainers());
+
+ // Set the node to dead, and then the workflow should get aborted, setting
+ // the node state back to IN_SERVICE on the next run.
+ nodeManager.setNodeStatus(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.IN_SERVICE,
+ HddsProtos.NodeState.HEALTHY));
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ @Test
+ public void testDecommissionAbortedWhenNodeGoesDead()
+ throws NodeNotFoundException, ContainerNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.HEALTHY));
+
+ nodeManager.setContainers(dn1, generateContainers(3));
+ mockGetContainerReplicaCount(
+ HddsProtos.LifeCycleState.CLOSED,
+ ContainerReplicaProto.State.DECOMMISSIONED,
+ ContainerReplicaProto.State.CLOSED,
+ ContainerReplicaProto.State.CLOSED);
+
+ // Add the node to the monitor, it should have 3 under-replicated containers
+ // after the first run
+ monitor.startMonitoring(dn1, 0);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ assertEquals(3, node.getUnderReplicatedContainers());
+
+ // Set the node to dead, and then the workflow should get aborted, setting
+ // the node state back to IN_SERVICE.
+ nodeManager.setNodeStatus(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ HddsProtos.NodeState.DEAD));
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ @Test
+ public void testMaintenanceWaitsForMaintenanceToComplete()
+ throws NodeNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ HddsProtos.NodeState.HEALTHY));
+
+ // Add the node to the monitor, it should transiting to
+ // IN_MAINTENANCE as there are no containers to replicate.
+ monitor.startMonitoring(dn1, 1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertEquals(0, node.getUnderReplicatedContainers());
+ assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
+
+ // Running the monitor again causes the node to remain in maintenance
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
+
+ // Set the maintenance end time to a time in the past and then the node
+ // should complete the workflow and transition to IN_SERVICE
+ node.setMaintenanceEnd(-1);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ @Test
+ public void testMaintenanceEndsClosingPipelines()
+ throws NodeNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ HddsProtos.NodeState.HEALTHY));
+ // Ensure the node has some pipelines
+ nodeManager.setPipelines(dn1, 2);
+ // Add the node to the monitor
+ monitor.startMonitoring(dn1, 1);
+ monitor.run();
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertTrue(nodeManager.getNodeStatus(dn1).isEnteringMaintenance());
+
+ // Set the maintenance end time to the past and the node should complete
+ // the workflow and return to IN_SERVICE
+ node.setMaintenanceEnd(-1);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ @Test
+ public void testMaintenanceEndsWhileReplicatingContainers()
+ throws ContainerNotFoundException, NodeNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ HddsProtos.NodeState.HEALTHY));
+
+ nodeManager.setContainers(dn1, generateContainers(3));
+ mockGetContainerReplicaCount(
+ HddsProtos.LifeCycleState.CLOSED,
+ ContainerReplicaProto.State.MAINTENANCE,
+ ContainerReplicaProto.State.MAINTENANCE,
+ ContainerReplicaProto.State.MAINTENANCE);
+
+ // Add the node to the monitor, it should transiting to
+ // REPLICATE_CONTAINERS as the containers are under-replicated for
+ // maintenance.
+ monitor.startMonitoring(dn1, 1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertTrue(nodeManager.getNodeStatus(dn1).isEnteringMaintenance());
+ assertEquals(3, node.getUnderReplicatedContainers());
+
+ node.setMaintenanceEnd(-1);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ @Test
+ public void testDeadMaintenanceNodeDoesNotAbortWorkflow()
+ throws NodeNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ HddsProtos.NodeState.HEALTHY));
+
+ // Add the node to the monitor, it should transiting to
+ // AWAIT_MAINTENANCE_END as there are no under-replicated containers.
+ monitor.startMonitoring(dn1, 1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
+ assertEquals(0, node.getUnderReplicatedContainers());
+
+ // Set the node dead and ensure the workflow does not end
+ NodeStatus status = nodeManager.getNodeStatus(dn1);
+ nodeManager.setNodeStatus(dn1, new NodeStatus(
+ status.getOperationalState(), HddsProtos.NodeState.DEAD));
+
+ // Running the monitor again causes the node to remain in maintenance
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
+ }
+
+ @Test
+ public void testCancelledNodesMovedToInService()
+ throws NodeNotFoundException {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
+ nodeManager.register(dn1,
+ new NodeStatus(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ HddsProtos.NodeState.HEALTHY));
+
+ // Add the node to the monitor, it should transiting to
+ // AWAIT_MAINTENANCE_END as there are no under-replicated containers.
+ monitor.startMonitoring(dn1, 1);
+ monitor.run();
+ assertEquals(1, monitor.getTrackedNodeCount());
+ DatanodeAdminNodeDetails node = getFirstTrackedNode();
+ assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
+ assertEquals(0, node.getUnderReplicatedContainers());
+
+ // Now cancel the node and run the monitor, the node should be IN_SERVICE
+ monitor.stopMonitoring(dn1);
+ monitor.run();
+ assertEquals(0, monitor.getTrackedNodeCount());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dn1).getOperationalState());
+ }
+
+ /**
+ * Generate a set of ContainerID, starting from an ID of zero up to the given
+ * count minus 1.
+ * @param count The number of ContainerID objects to generate.
+ * @return A Set of ContainerID objects.
+ */
+ private Set<ContainerID> generateContainers(int count) {
+ Set<ContainerID> containers = new HashSet<>();
+ for (int i=0; i<count; i++) {
+ containers.add(new ContainerID(i));
+ }
+ return containers;
+ }
+
+ /**
+ * Create a ContainerReplicaCount object, including a container with the
+ * requested ContainerID and state, along with a set of replicas of the given
+ * states.
+ * @param containerID The ID of the container to create an included
+ * @param containerState The state of the container
+ * @param states Create a replica for each of the given states.
+ * @return A ContainerReplicaCount containing the generated container and
+ * replica set
+ */
+ private ContainerReplicaCount generateReplicaCount(ContainerID containerID,
+ HddsProtos.LifeCycleState containerState,
+ ContainerReplicaProto.State... states) {
+ Set<ContainerReplica> replicas = new HashSet<>();
+ for (ContainerReplicaProto.State s : states) {
+ replicas.add(generateReplica(containerID, s));
+ }
+ ContainerInfo container = new ContainerInfo.Builder()
+ .setContainerID(containerID.getId())
+ .setState(containerState)
+ .build();
+
+ return new ContainerReplicaCount(container, replicas, 0, 0, 3, 2);
+ }
+
+ /**
+ * Generate a new ContainerReplica with the given containerID and State.
+ * @param containerID The ID the replica is associated with
+ * @param state The state of the generated replica.
+ * @return A containerReplica with the given ID and state
+ */
+ private ContainerReplica generateReplica(ContainerID containerID,
+ ContainerReplicaProto.State state) {
+ return ContainerReplica.newBuilder()
+ .setContainerState(state)
+ .setContainerID(containerID)
+ .setSequenceId(1)
- .setDatanodeDetails(TestUtils.randomDatanodeDetails())
++ .setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
+ .build();
+ }
+
+ /**
+ * Helper method to get the first node from the set of trackedNodes within
+ * the monitor.
+ * @return DatanodeAdminNodeDetails for the first tracked node found.
+ */
+ private DatanodeAdminNodeDetails getFirstTrackedNode() {
+ return
+ monitor.getTrackedNodes().toArray(new DatanodeAdminNodeDetails[0])[0];
+ }
+
+ /**
+ * The only interaction the DatanodeAdminMonitor has with the
+ * ReplicationManager, is to request a ContainerReplicaCount object for each
+ * container on nodes being deocmmissioned or moved to maintenance. This
+ * method mocks that interface to return a ContainerReplicaCount with a
+ * container in the given containerState and a set of replias in the given
+ * replicaStates.
+ * @param containerState
+ * @param replicaStates
+ * @throws ContainerNotFoundException
+ */
+ private void mockGetContainerReplicaCount(
+ HddsProtos.LifeCycleState containerState,
+ ContainerReplicaProto.State... replicaStates)
+ throws ContainerNotFoundException {
+ reset(repManager);
+ Mockito.when(repManager.getContainerReplicaCount(
+ Mockito.any(ContainerID.class)))
+ .thenAnswer(invocation ->
+ generateReplicaCount((ContainerID)invocation.getArguments()[0],
+ containerState, replicaStates));
+ }
+
+ /**
+ * This simple internal class is used to track and handle any DatanodeAdmin
+ * events fired by the DatanodeAdminMonitor during tests.
+ */
+ private class DatanodeAdminHandler implements
+ EventHandler<DatanodeDetails> {
+
+ private AtomicInteger invocation = new AtomicInteger(0);
+
+ @Override
+ public void onMessage(final DatanodeDetails dn,
+ final EventPublisher publisher) {
+ invocation.incrementAndGet();
+ }
+
+ public int getInvocation() {
+ return invocation.get();
+ }
+ }
+}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminNodeDetails.java
index c5310b9,0000000..7a813a9
mode 100644,000000..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminNodeDetails.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminNodeDetails.java
@@@ -1,81 -1,0 +1,81 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
- import org.apache.hadoop.hdds.scm.TestUtils;
++import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import static junit.framework.TestCase.*;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
+
+/**
+ * Tests to validate the DatanodeAdminNodeDetails class.
+ */
+public class TestDatanodeAdminNodeDetails {
+
+ private OzoneConfiguration conf;
+
+ @Before
+ public void setup() {
+ conf = new OzoneConfiguration();
+ }
+
+ @After
+ public void teardown() {
+ }
+
+ @Test
+ public void testEqualityBasedOnDatanodeDetails() {
- DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
- DatanodeDetails dn2 = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
++ DatanodeDetails dn2 = MockDatanodeDetails.randomDatanodeDetails();
+ DatanodeAdminNodeDetails details1 =
+ new DatanodeAdminNodeDetails(dn1, 0);
+ DatanodeAdminNodeDetails details2 =
+ new DatanodeAdminNodeDetails(dn2, 0);
+
+ assertNotEquals(details1, details2);
+ assertEquals(details1,
+ new DatanodeAdminNodeDetails(dn1, 0));
+ assertNotEquals(details1, dn1);
+ }
+
+
+
+ @Test
+ public void testMaintenanceEnd() {
- DatanodeDetails dn = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+ // End in zero hours - should never end.
+ DatanodeAdminNodeDetails details = new DatanodeAdminNodeDetails(dn, 0);
+ assertFalse(details.shouldMaintenanceEnd());
+
+ // End 1 hour - maintenance should not end yet.
+ details.setMaintenanceEnd(1);
+ assertFalse(details.shouldMaintenanceEnd());
+
+ // End 1 hour ago - maintenance should end.
+ details.setMaintenanceEnd(-1);
+ assertTrue(details.shouldMaintenanceEnd());
+ }
+
+}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
index daf6731,0000000..df62438
mode 100644,000000..100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionManager.java
@@@ -1,292 -1,0 +1,292 @@@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.node;
+
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
++import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.HddsTestUtils;
- import org.apache.hadoop.hdds.scm.TestUtils;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Before;
+import org.junit.Test;
+import java.io.IOException;
+import java.util.List;
+import java.util.UUID;
+import java.util.Arrays;
+import java.util.ArrayList;
+import static junit.framework.TestCase.assertEquals;
+import static org.assertj.core.api.Fail.fail;
+
+/**
+ * Unit tests for the decommision manager.
+ */
+
+public class TestNodeDecommissionManager {
+
+ private NodeDecommissionManager decom;
+ private StorageContainerManager scm;
+ private NodeManager nodeManager;
+ private OzoneConfiguration conf;
+ private String storageDir;
+
+ @Before
+ public void setup() throws Exception {
+ conf = new OzoneConfiguration();
+ storageDir = GenericTestUtils.getTempPath(
+ TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
+ nodeManager = createNodeManager(conf);
+ decom = new NodeDecommissionManager(
+ conf, nodeManager, null, null, null);
+ }
+
+ @Test
+ public void testHostStringsParseCorrectly()
+ throws InvalidHostStringException {
+ NodeDecommissionManager.HostDefinition def =
+ new NodeDecommissionManager.HostDefinition("foobar");
+ assertEquals("foobar", def.getHostname());
+ assertEquals(-1, def.getPort());
+
+ def = new NodeDecommissionManager.HostDefinition(" foobar ");
+ assertEquals("foobar", def.getHostname());
+ assertEquals(-1, def.getPort());
+
+ def = new NodeDecommissionManager.HostDefinition("foobar:1234");
+ assertEquals("foobar", def.getHostname());
+ assertEquals(1234, def.getPort());
+
+ def = new NodeDecommissionManager.HostDefinition(
+ "foobar.mycompany.com:1234");
+ assertEquals("foobar.mycompany.com", def.getHostname());
+ assertEquals(1234, def.getPort());
+
+ try {
+ def = new NodeDecommissionManager.HostDefinition("foobar:abcd");
+ fail("InvalidHostStringException should have been thrown");
+ } catch (InvalidHostStringException e) {
+ }
+ }
+
+ @Test
+ public void testAnyInvalidHostThrowsException()
+ throws InvalidHostStringException{
+ List<DatanodeDetails> dns = generateDatanodes();
+
+ // Try to decommission a host that does exist, but give incorrect port
+ try {
+ decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress()+":10"));
+ fail("InvalidHostStringException expected");
+ } catch (InvalidHostStringException e) {
+ }
+
+ // Try to decommission a host that does not exist
+ try {
+ decom.decommissionNodes(Arrays.asList("123.123.123.123"));
+ fail("InvalidHostStringException expected");
+ } catch (InvalidHostStringException e) {
+ }
+
+ // Try to decommission a host that does exist and a host that does not
+ try {
+ decom.decommissionNodes(Arrays.asList(
+ dns.get(1).getIpAddress(), "123,123,123,123"));
+ fail("InvalidHostStringException expected");
+ } catch (InvalidHostStringException e) {
+ }
+
+ // Try to decommission a host with many DNs on the address with no port
+ try {
+ decom.decommissionNodes(Arrays.asList(
+ dns.get(0).getIpAddress()));
+ fail("InvalidHostStringException expected");
+ } catch (InvalidHostStringException e) {
+ }
+
+ // Try to decommission a host with many DNs on the address with a port
+ // that does not exist
+ try {
+ decom.decommissionNodes(Arrays.asList(
+ dns.get(0).getIpAddress()+":10"));
+ fail("InvalidHostStringException expected");
+ } catch (InvalidHostStringException e) {
+ }
+ }
+
+ @Test
+ public void testNodesCanBeDecommissionedAndRecommissioned()
+ throws InvalidHostStringException, NodeNotFoundException {
+ List<DatanodeDetails> dns = generateDatanodes();
+
+ // Decommission 2 valid nodes
+ decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+ dns.get(2).getIpAddress()));
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+
+ // Running the command again gives no error - nodes already decommissioning
+ // are silently ignored.
+ decom.decommissionNodes(Arrays.asList(dns.get(1).getIpAddress(),
+ dns.get(2).getIpAddress()));
+
+ // Attempt to decommission dn(10) which has multiple hosts on the same IP
+ // and we hardcoded ports to 3456, 4567, 5678
+ DatanodeDetails multiDn = dns.get(10);
+ String multiAddr =
+ multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue();
+ decom.decommissionNodes(Arrays.asList(multiAddr));
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(multiDn).getOperationalState());
+
+ // Recommission all 3 hosts
+ decom.recommissionNodes(Arrays.asList(
+ multiAddr, dns.get(1).getIpAddress(), dns.get(2).getIpAddress()));
+ decom.getMonitor().run();
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dns.get(10)).getOperationalState());
+ }
+
+ @Test
+ public void testNodesCanBePutIntoMaintenanceAndRecommissioned()
+ throws InvalidHostStringException, NodeNotFoundException {
+ List<DatanodeDetails> dns = generateDatanodes();
+
+ // Put 2 valid nodes into maintenance
+ decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(),
+ dns.get(2).getIpAddress()), 100);
+ assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+
+ // Running the command again gives no error - nodes already decommissioning
+ // are silently ignored.
+ decom.startMaintenanceNodes(Arrays.asList(dns.get(1).getIpAddress(),
+ dns.get(2).getIpAddress()), 100);
+
+ // Attempt to decommission dn(10) which has multiple hosts on the same IP
+ // and we hardcoded ports to 3456, 4567, 5678
+ DatanodeDetails multiDn = dns.get(10);
+ String multiAddr =
+ multiDn.getIpAddress()+":"+multiDn.getPorts().get(0).getValue();
+ decom.startMaintenanceNodes(Arrays.asList(multiAddr), 100);
+ assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ nodeManager.getNodeStatus(multiDn).getOperationalState());
+
+ // Recommission all 3 hosts
+ decom.recommissionNodes(Arrays.asList(
+ multiAddr, dns.get(1).getIpAddress(), dns.get(2).getIpAddress()));
+ decom.getMonitor().run();
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.IN_SERVICE,
+ nodeManager.getNodeStatus(dns.get(10)).getOperationalState());
+ }
+
+ @Test
+ public void testNodesCannotTransitionFromDecomToMaint() throws Exception {
+ List<DatanodeDetails> dns = generateDatanodes();
+
+ // Put 1 node into maintenance and another into decom
+ decom.startMaintenance(dns.get(1), 100);
+ decom.startDecommission(dns.get(2));
+ assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+
+ // Try to go from maint to decom:
+ try {
+ decom.startDecommission(dns.get(1));
+ fail("Expected InvalidNodeStateException");
+ } catch (InvalidNodeStateException e) {
+ }
+
+ // Try to go from decom to maint:
+ try {
+ decom.startMaintenance(dns.get(2), 100);
+ fail("Expected InvalidNodeStateException");
+ } catch (InvalidNodeStateException e) {
+ }
+
+ // Ensure the states are still as before
+ assertEquals(HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE,
+ nodeManager.getNodeStatus(dns.get(1)).getOperationalState());
+ assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
+ nodeManager.getNodeStatus(dns.get(2)).getOperationalState());
+ }
+
+
+
+ private SCMNodeManager createNodeManager(OzoneConfiguration config)
+ throws IOException, AuthenticationException {
+ scm = HddsTestUtils.getScm(config);
+ return (SCMNodeManager) scm.getScmNodeManager();
+ }
+
+ /**
+ * Generate a list of random DNs and return the list. A total of 11 DNs will
+ * be generated and registered with the node manager. Index 0 and 10 will
+ * have the same IP and host and the rest will have unique IPs and Hosts.
+ * The DN at index 10, has 3 hard coded ports of 3456, 4567, 5678. All other
+ * DNs will have ports set to 0.
+ * @return The list of DatanodeDetails Generated
+ */
+ private List<DatanodeDetails> generateDatanodes() {
+ List<DatanodeDetails> dns = new ArrayList<>();
+ for (int i=0; i<10; i++) {
- DatanodeDetails dn = TestUtils.randomDatanodeDetails();
++ DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
+ dns.add(dn);
+ nodeManager.register(dn, null, null);
+ }
+ // We have 10 random DNs, we want to create another one that is on the same
+ // host as some of the others.
+ DatanodeDetails multiDn = dns.get(0);
+
+ DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
+ builder.setUuid(UUID.randomUUID().toString())
+ .setHostName(multiDn.getHostName())
+ .setIpAddress(multiDn.getIpAddress())
+ .addPort(DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.STANDALONE, 3456))
+ .addPort(DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.RATIS, 4567))
+ .addPort(DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.REST, 5678))
+ .setNetworkLocation(multiDn.getNetworkLocation());
+
+ DatanodeDetails dn = builder.build();
+ nodeManager.register(dn, null, null);
+ dns.add(dn);
+ return dns;
+ }
+
+}
diff --cc hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 5a9ae85,f5e3f84..292a68a
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@@ -20,11 -20,10 +20,11 @@@ package org.apache.hadoop.hdds.scm.pipe
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
- import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org