You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by na...@apache.org on 2020/10/25 17:23:46 UTC
[hadoop-ozone] branch HDDS-2823 updated (927c58a -> 782057a)
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a change to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git.
discard 927c58a HDDS-4365: SCMBlockLocationFailoverProxyProvider should use ScmBlockLocationProtocolPB.class in RPC.setProtocolEngine. (#1512)
omit 7ddaa07 HDDS-4192: enable SCM Raft Group based on config ozone.scm.names (#1428)
omit 138d33e HDDS-3188 Add failover proxy for SCM block location. (#1340)
omit 1c2a950 HDDS-4115. CLI command to show current SCM leader and follower status. (#1346)
omit 0f64aab HDDS-3895. Implement container related operations in ContainerManagerImpl. (#1145)
omit ccbf5b3 HDDS-4130: remove the 1st edition of RatisServer of SCM HA which is copied from OM HA (#1341)
omit 1b2ac22 HDDS-4093: Update RATIS version from 1.0.0 to 1.1.0-85281b2-SNAPSHOT. (#1307)
omit fb32aa7 HDDS-4125: Pipeline is not removed when a datanode goes stale (#1334)
omit e4a2a3d HDDS-4059: SCMStateMachine::applyTransaction() should not invoke TransactionContext.getClientRequest() (#1287)
omit a92a2f7 Update PipelineManagerV2 with getNumHealthyVolumes interface.
omit 4d186d4 HDDS-3994. Make retry policy can be set by configuration. (#1231)
omit 040577c HDDS-3446. Enable TestOzoneManagerRestart and address any failure. (#1279)
omit dd28dc9 HDDS-4058. Wrong use of AtomicBoolean in HddsDatanodeService (#1284)
omit 979faa7 HDDS-4029. Recon unable to add a new container which is in CLOSED state. (#1258)
omit 26ffe01 HDDS-4021. Organize Recon DBs into a 'DBDefinition'. (#1255)
omit da4fccb HDDS-4052. Remove master/slave terminology from Ozone (#1281)
omit 036c142 HDDS-4047. OzoneManager met NPE exception while getServiceList (#1277)
omit f511340 HDDS-3990. Test Kubernetes examples with acceptance tests (#1223)
omit 19eaad1 HDDS-4045. Add more ignore rules to the RAT ignore list (#1273)
omit 8b7e0d3 HDDS-3970. Enabling TestStorageContainerManager with all failures addressed (#1257)
omit 00afc8d HDDS-4033. Make the acceptance test reports hierarchical (#1263)
omit c17504b HDDS-3423. Enabling TestContainerReplicationEndToEnd and addressing failures (#1260)
omit 7537e34 HDDS-4027. Suppress ERROR message when SCM attempt to create additional pipelines. (#1265)
omit acd3480 HDDS-4024. Avoid while loop too soon when exception happen (#1253)
omit 52fc2ac HDDS-3809. Make number of open containers on a datanode a function of no of volumes reported by it. (#1081)
omit 08c6512 HDDS-4019. Show the storageDir while need init om or scm (#1248)
omit dc72bb9 HDDS-3511. Fix javadoc comment in OmMetadataManager (#1247)
omit 91a43ec HDDS-4041. Ozone /conf endpoint triggers kerberos replay error when SPNEGO is enabled. (#1267)
omit 6b36e21 HDDS-4031. Run shell tests in CI (#1261)
omit d80f873 HDDS-4038. Eliminate GitHub check warnings (#1268)
omit d6d7d81 HDDS-4011. Update S3 related documentation. (#1245)
omit dd7eaf1 HDDS-4030. Remember the selected columns and make the X-axis scrollable in recon datanodes UI (#1259)
omit 2b78046 HDDS-4032. Run author check without docker (#1262)
omit 1fa9e78 HDDS-4026. Dir rename failed when sets 'ozone.om.enable.filesystem.paths' to true (#1256)
omit 64d76ed HDDS-4017. Acceptance check may run against wrong commit (#1249)
omit 5dd1015 HDDS-4000. Split acceptance tests to reduce CI feedback time (#1236)
omit 2286ee5 HDDS-3905. Show status of OM in the OM web ui (#1152)
omit 749d874 HDDS-4022. Ozone s3 API return 400 Bad Request for head-bucket for non existing bucket. (#1251)
omit 7545f18 HDDS-3975. Use Duration for time in RatisClientConfig (#1217)
omit 60b328b HDDS-3877. Do not fail CI check for log upload failure (#1209)
omit d757253 HDDS-3973. Update main feature design status. (#1207)
omit e41edd6 HDDS-4025. Add test for creating encrypted key (#1254)
omit c059c83 HDDS-4007. Generate encryption info for the bucket outside bucket lock. (#1242)
omit c884133 HDDS-3997. Ozone certificate needs additional flags and SAN extension… (#1235)
omit b8b1f01 HDDS-3996. Missing TLS client configurations to allow ozone.grpc.tls.… (#1234)
omit 5486793 HDDS-3999. OM Shutdown when Commit part tries to commit the part, after abort upload. (#1244)
omit 7ff95c0 HDDS-4018. Datanode log spammed by NPE (#1250)
omit 829143e HDDS-3658. Stop to persist container related pipeline info of each ke… (#1012)
omit eff1f43 HDDS-4008. Recon should fallback to ozone.om.service.ids when the internal service id is not defined. (#1243)
omit 83bff20 HDDS-4006. Disallow MPU on encrypted buckets. (#1241)
omit 51a0c93 HDDS-3998. Shorten Ozone FS Hadoop compatibility module names (#1237)
omit a5b2093 HDDS-3827. Intermittent failure in TestKeyManagerUnit#listMultipartUploads (#1239)
omit 0d61e47 HDDS-4003. Delete the redundant word of the description (#1240)
omit 12d1d35 HDDS-3969. Add validName check for FileSystem requests (#1211)
omit d144473 HDDS-3993. Create volume required for S3G during OM startup. (#1227)
omit 8cb1e85 Remove optional jersey-json dependency (#1238)
omit 5c541bc HDDS-3718: Improve OmKeyLocationInfoGroup internal data structure (#1023)
omit afe9988 HDDS-2770. security/SecurityAcls.md (#1190)
omit 364468c HDDS-3933. Fix memory leak because of too many Datanode State Machine Thread (#1185)
omit 7f699fa HDDS-3991. Ignore protobuf lock files (#1224)
omit 4ff513f HDDS-3980. Correct the toString of RangeHeader (#1213)
omit a0dcdd3 HDDS-3989. Addendum: revert proto.lock file (#1226)
omit 093cea1 HDDS-3892. Datanode initialization is too slow when there are thousan… (#1147)
omit 7e8f2ee HDDS-3992. Remove project skeleton of in-place upgrade feature (#1225)
omit eb765fe HDDS-3989. Display revision and build date of DN in recon UI (#1226)
omit a265f7f HDDS-3986. Frequent failure in TestCommitWatcher#testReleaseBuffersOnException (#1220)
omit 8d8c5cc HDDS-3813. Upgrade Ratis third-party, too (#1229)
omit c53c0e0 Update ratis to 1.0.0 (#1222)
omit fb3cfe9 HDDS-3982. Disable moveToTrash in o3fs and ofs temporarily (#1215)
omit 26a38a2 HDDS-3987. Encrypted bucket creation failed with INVALID_REQUEST Encryption cannot be set for bucket links (#1221)
omit 23dafc5 HDDS-3984. Support filter and search the columns in recon UI (#1218)
omit 5d6f832 HDDS-3806. Support recognize aws v2 Authorization header. (#1098)
omit 51a3582 HDDS-3955. Unable to list intermediate paths on keys created using S3G. (#1196)
omit db3cef0 HDDS-3741. Reload old OM state if Install Snapshot from Leader fails (#1129)
omit 888abde HDDS-3965. SCM failed to start up for duplicated pipeline detected. (#1210)
omit ed8df6b HDDS-3855. Add upgrade smoketest (#1142)
omit d3dda10 HDDS-3964. Ratis config key mismatch (#1204)
omit 97fdf92 HDDS-3612. Allow mounting bucket under other volume (#1104)
omit e7cac9a HDDS-3926. OM Token Identifier table should use in-house serialization. (#1182)
omit 168ba19 HDDS-3824: OM read requests should make SCM#refreshPipeline outside BUCKET_LOCK (#1164)
omit 82c402e HDDS-3966. Disable flaky TestOMRatisSnapshots
omit e9eb482 HDDS-3807. Propagate raft log disks info to SCM from datanode. (#1107)
omit f13a221 HDDS-3923. Display the safemode status on scm page (#1165)
omit c4f1446 HDDS-3958. Intermittent failure in Recon acceptance test due to mixed stdout and stderr (#1200)
omit dddc85d HDDS-2767. security/SecuringTDE.md (#1184)
omit d19f322 HDDS-3968. LDB scan fails to read from transactionInfoTable. (#1205)
omit f322918 HDDS-3967. Remove leftover debug setting (#1202)
omit c219647 HDDS-3765. Fluentd writing to secure Ozone S3 API fails with 500 Error. (#1179)
omit ad71958 HDDS-3798. Display more accurate timestamp in recon Web (#1201)
omit 2e3e064 HDDS-3837 Add isLeader check in SCMHAManager. (#1191)
add da49ca6 HDDS-3798. Display more accurate timestamp in recon Web (#1201)
add d6c7f28 HDDS-3765. Fluentd writing to secure Ozone S3 API fails with 500 Error. (#1179)
add fadc7f1 HDDS-3967. Remove leftover debug setting (#1202)
add 6f1fba4 HDDS-3968. LDB scan fails to read from transactionInfoTable. (#1205)
add 22d03f6 HDDS-2767. security/SecuringTDE.md (#1184)
add d3e54fb HDDS-3958. Intermittent failure in Recon acceptance test due to mixed stdout and stderr (#1200)
add 7266bf8 HDDS-3923. Display the safemode status on scm page (#1165)
add de02785 HDDS-3807. Propagate raft log disks info to SCM from datanode. (#1107)
add 16dba63 HDDS-3966. Disable flaky TestOMRatisSnapshots
add 46e7b2f HDDS-3824: OM read requests should make SCM#refreshPipeline outside BUCKET_LOCK (#1164)
add 7e37f7b HDDS-3926. OM Token Identifier table should use in-house serialization. (#1182)
add 7aff2f0 HDDS-3612. Allow mounting bucket under other volume (#1104)
add f15b011 HDDS-3964. Ratis config key mismatch (#1204)
add 9b13ab6 HDDS-3855. Add upgrade smoketest (#1142)
add ca4c5a1 HDDS-3965. SCM failed to start up for duplicated pipeline detected. (#1210)
add 3571d7e HDDS-3741. Reload old OM state if Install Snapshot from Leader fails (#1129)
add 715aed2 HDDS-3955. Unable to list intermediate paths on keys created using S3G. (#1196)
add c64d226 HDDS-3806. Support recognize aws v2 Authorization header. (#1098)
add caf4711 HDDS-3984. Support filter and search the columns in recon UI (#1218)
add 8339b38 HDDS-3987. Encrypted bucket creation failed with INVALID_REQUEST Encryption cannot be set for bucket links (#1221)
add fbd125c HDDS-3982. Disable moveToTrash in o3fs and ofs temporarily (#1215)
add fb2649e Update ratis to 1.0.0 (#1222)
add 798e00c HDDS-3813. Upgrade Ratis third-party, too (#1229)
add 402a427 HDDS-3986. Frequent failure in TestCommitWatcher#testReleaseBuffersOnException (#1220)
add 76a9883 HDDS-3989. Display revision and build date of DN in recon UI (#1226)
add 937f36f HDDS-3992. Remove project skeleton of in-place upgrade feature (#1225)
add 783a18c HDDS-3892. Datanode initialization is too slow when there are thousan… (#1147)
add fd7e05c HDDS-3989. Addendum: revert proto.lock file (#1226)
add c3bbe18 HDDS-3980. Correct the toString of RangeHeader (#1213)
add 404ec6d HDDS-3991. Ignore protobuf lock files (#1224)
add ff7b5a3 HDDS-3933. Fix memory leak because of too many Datanode State Machine Thread (#1185)
add 05dccfd HDDS-2770. security/SecurityAcls.md (#1190)
add 40b3f13 HDDS-3718: Improve OmKeyLocationInfoGroup internal data structure (#1023)
add 8e300bc Remove optional jersey-json dependency (#1238)
add 7dac140 HDDS-3993. Create volume required for S3G during OM startup. (#1227)
add 854f5d3 HDDS-3969. Add validName check for FileSystem requests (#1211)
add f96b8fc HDDS-4003. Delete the redundant word of the description (#1240)
add 5264b24 HDDS-3827. Intermittent failure in TestKeyManagerUnit#listMultipartUploads (#1239)
add 1ae0378 HDDS-3998. Shorten Ozone FS Hadoop compatibility module names (#1237)
add 6ca82b8 HDDS-4006. Disallow MPU on encrypted buckets. (#1241)
add cf7a583 HDDS-4008. Recon should fallback to ozone.om.service.ids when the internal service id is not defined. (#1243)
add 71cc33d HDDS-3658. Stop to persist container related pipeline info of each ke… (#1012)
add a4f7e32 HDDS-4018. Datanode log spammed by NPE (#1250)
add 32ac7bf HDDS-3999. OM Shutdown when Commit part tries to commit the part, after abort upload. (#1244)
add facf36e HDDS-3996. Missing TLS client configurations to allow ozone.grpc.tls.… (#1234)
add 0bb3e24 HDDS-3997. Ozone certificate needs additional flags and SAN extension… (#1235)
add ac2769e HDDS-4007. Generate encryption info for the bucket outside bucket lock. (#1242)
add 093f556 HDDS-4025. Add test for creating encrypted key (#1254)
add e643ab2 HDDS-3973. Update main feature design status. (#1207)
add 78875bb HDDS-3877. Do not fail CI check for log upload failure (#1209)
add 18552c1 HDDS-3975. Use Duration for time in RatisClientConfig (#1217)
add a123b4e HDDS-4022. Ozone s3 API return 400 Bad Request for head-bucket for non existing bucket. (#1251)
add 2ba43d0 HDDS-3905. Show status of OM in the OM web ui (#1152)
add a7fe726 HDDS-4000. Split acceptance tests to reduce CI feedback time (#1236)
add fd47f91 HDDS-4017. Acceptance check may run against wrong commit (#1249)
add ddd3211 HDDS-4026. Dir rename failed when sets 'ozone.om.enable.filesystem.paths' to true (#1256)
add 99c273f HDDS-4032. Run author check without docker (#1262)
add 0869cce HDDS-4030. Remember the selected columns and make the X-axis scrollable in recon datanodes UI (#1259)
add 182c344 HDDS-4011. Update S3 related documentation. (#1245)
add a77d9ea HDDS-4038. Eliminate GitHub check warnings (#1268)
add 829b860 HDDS-4031. Run shell tests in CI (#1261)
add 21c08ee HDDS-4041. Ozone /conf endpoint triggers kerberos replay error when SPNEGO is enabled. (#1267)
add 1613726 HDDS-3511. Fix javadoc comment in OmMetadataManager (#1247)
add 0993d12 HDDS-4019. Show the storageDir while need init om or scm (#1248)
add 1346f49 HDDS-3809. Make number of open containers on a datanode a function of no of volumes reported by it. (#1081)
add a96553e HDDS-4024. Avoid while loop too soon when exception happen (#1253)
add 0892fab HDDS-4027. Suppress ERROR message when SCM attempt to create additional pipelines. (#1265)
add 93ac9ac HDDS-3423. Enabling TestContainerReplicationEndToEnd and addressing failures (#1260)
add 5837e86 HDDS-4033. Make the acceptance test reports hierarchical (#1263)
add 9f46fb8 HDDS-3970. Enabling TestStorageContainerManager with all failures addressed (#1257)
add 025f458 HDDS-4045. Add more ignore rules to the RAT ignore list (#1273)
add 30ec0e2 HDDS-3990. Test Kubernetes examples with acceptance tests (#1223)
add a95b0b8 HDDS-4047. OzoneManager met NPE exception while getServiceList (#1277)
add 0651be8 HDDS-4052. Remove master/slave terminology from Ozone (#1281)
add e219aae HDDS-4021. Organize Recon DBs into a 'DBDefinition'. (#1255)
add c07ccd7 HDDS-4029. Recon unable to add a new container which is in CLOSED state. (#1258)
add 12e9a26 HDDS-4058. Wrong use of AtomicBoolean in HddsDatanodeService (#1284)
add aad9c27 HDDS-3446. Enable TestOzoneManagerRestart and address any failure. (#1279)
add ff621c6 HDDS-3994. Make retry policy can be set by configuration. (#1231)
add c7ae9fe HDDS-4035. Update logs of HadoopDirGenerator. (#1264)
add cc5901f HDDS-4063. Fix InstallSnapshot in OM HA (#1294)
add d7ea496 HDDS-4044. Deprecate ozone.s3g.volume.name. #1270
add 99b693e HDDS-4073. Remove leftover robot.robot (#1297)
add db31571 HDDS-4066. Add core-site.xml to intellij configuration (#1292)
add 5ce6f0e HDDS-4042. Update documentation for the GA release (#1269)
add ca8eb40 HDDS-4055. Cleanup GitHub workflow (#1282)
add d418f00 HDDS-4051. Remove whitelist/blacklist terminology from Ozone (#1306)
add 76c448f HDDS-4046. Extensible subcommands for CLI applications (#1276)
add 43471f7 HDDS-4076. Translate CSI.md into Chinese (#1299)
add 3fc8cf2 HDDS-4034. Add Unit Test for HadoopNestedDirGenerator. (#1266)
add cfc023a HDDS-4078. Use HDDS InterfaceAudience/Stability annotations (#1302)
add 2da809e HDDS-4048. Show more information while SCM version info mismatch (#1278)
add 9a702e5 HDDS-3979. Make bufferSize configurable for stream copy (#1212)
add a79dfae HDDS-3833. Use Pipeline choose policy to choose pipeline from exist pipeline list (#1096)
add cee43e9 HDDS-3878. Make OMHA serviceID optional if one (but only one) is defined in the config (#1149)
add 9c22180 HDDS-4067. Implement toString for OMTransactionInfo (#1300)
add 0f23b22 HDDS-4061. Pending delete blocks are not always included in #BLOCKCOUNT metadata (#1288)
add b186b90 HDDS-4040. [OFS] BasicRootedOzoneFileSystem to support batchDelete (#1286)
add 7ab53b5 HDDS-4057. Failed acceptance test missing from bundle (#1283)
add d758f30 HDDS-4095. Byteman script to debug HCFS performance (#1311)
add 4ec1087 HDDS-3232. Include the byteman scripts in the distribution tar file (#1309)
add 83697f9 HDDS-4037. Incorrect container numberOfKeys and usedBytes in SCM after key deletion (#1295)
add eb70d9e HDDS-4009. Recon Overview page: The volume, bucket and key counts are not accurate (#1305)
add 2c102bd HDDS-4108. ozone debug ldb scan without arguments results in core dump (#1317)
add b8d1e3d HDDS-4099. No Log4j 2 configuration file found error appears in CLI (#1318)
add e5e89e0 HDDS-4114. Bump log4j2 version (#1325)
add 59fc0bb HDDS-4127. Components with web interface should depend on hdds-docs. (#1335)
add 1abbfed HDDS-4094. Support byte-level write in Freon HadoopFsGenerator (#1310)
add 1c7003e HDDS-4139. Update version number in upgrade tests (#1347)
add c656feb HDDS-4144. Update version info in hadoop client dependency readme (#1348)
add 122eac5 HDDS-4074. [OFS] Implement AbstractFileSystem for RootedOzoneFileSystem (#1330)
add 854fdc4 HDDS-4112. Improve SCM webui page performance (#1323)
add c0084a1 HDDS-3654. Let backgroundCreator create pipeline for the support replication factors alternately (#984)
add a2080cf HDDS-4111. Keep the CSI.zh.md consistent with CSI.md (#1320)
add 8102ac7 HDDS-4062. Non rack aware pipelines should not be created if multiple racks are alive. (#1291)
add 9292b39 HDDS-4068. Client should not retry same OM on network connection failure (#1324)
add 7f674fd HDDS-3972. Add option to limit number of items displaying through ldb tool. (#1206)
add bc7786a HDDS-4056. Convert OzoneAdmin to pluggable model (#1285)
add 5fab834 HDDS-4152. Archive container logs for kubernetes check (#1355)
add 5523636 HDDS-4140. Auto-close /pending pull requests after 21 days of inactivity (#1344)
add dcb1c6e HDDS-2411. add a datanode chunk validator fo datanode chunk generator (#1312)
add 2f3edd9 HDDS-4153. Increase default timeout in kubernetes tests (#1357)
add da61c4a HDDS-4149. Implement OzoneFileStatus#toString (#1356)
add d064230 HDDS-4109. Tests in TestOzoneFileSystem should use the existing MiniOzoneCluster (#1316)
add f6e4417 HDDS-4145. Bump version to 1.1.0-SNAPSHOT on master (#1349)
add 02289ce HDDS-4146. Show the ScmId and ClusterId in the scm web ui. (#1350)
add f64bc6e HDDS-4137. Turn on the verbose mode of safe mode check on testlib (#1343)
add 44acf78 HDDS-4147. Add OFS to FileSystem META-INF (#1352)
add 8e98977 HDDS-4151. Skip the inputstream while offset larger than zero in s3g (#1354)
add d34ab29 HDDS-3903. OzoneRpcClient support batch rename keys. (#1150)
add 78ca8bf HDDS-4077. Incomplete OzoneFileSystem statistics (#1329)
add 0ec1a8a HDDS-3867. Extend the chunkinfo tool to display information from all nodes in the pipeline. (#1154)
add 34ee831 HDDS-4121. Implement OmMetadataMangerImpl#getExpiredOpenKeys. (#1351)
add 13fe31b HDDS-4167. Acceptance test logs missing if fails during cluster startup (#1366)
add 9cef3f6 HDDS-4176. Fix failed UT: test2WayCommitForTimeoutException (#1370)
add 199512b HDDS-4131. Container report should update container key count and bytes used if they differ in SCM (#1339)
add 77d56e6 HDDS-4165. GitHub Actions cache does not work outside of workspace (#1364)
add c77e7ba HDDS-3804. Recon start fails with SQL exception with MySQL DB. (#1377)
add b58054e HDDS-4169. Fix some minor errors in StorageContainerManager.md (#1367)
add d2fb937 HDDS-4200. Fix missing right bracket in HA doc (#1380)
add dc5a997 HDDS-1889. Add support for verifying multiline log entry (#1308)
add 79f9fab HDDS-3762. Intermittent failure in TestDeleteWithSlowFollower (#1376)
add fd63aac HDDS-4150. Disabling flaky unit test until HDDS-4150 is fixed.
add b2fca43 HDDS-4189. Add alias `roles` for `ozone admin om` subcommand `getserviceroles` (#1375)
add c8d5334 HDDS-4199. Fix failed UT: TestOMAllocateBlockRequest#testValidateAndUpdateCache (#1379)
add ead6371 HDDS-3840. Use OFS in MapReduce acceptance test (#1365)
add ceeca92 HDDS-4201. Improve the performance of OmKeyLocationInfoGroup (#1381)
add 642d660 HDDS-4186: Adjust RetryPolicy of SCMConnectionManager for SCM/Recon (#1373)
add f254183 HDDS-3725. Ozone sh volume client support quota option. (#1233)
add acfef2d HDDS-4205. Disable coverage upload to codecov for pull requests (#1394)
add 549a1a0 HDDS-4197. Failed to load existing service definition files: ...SubcommandWithParent (#1386)
add 7bf205c Removing an archaic reference to Skaffold in the README and other little improvements (#1360)
add 157864a HDDS-4208. Fix table rendering and logo display in docs (#1391)
add b12f6b6 HDDS-4161. Set fs.defaultFS in docker compose cluster config to OFS (#1362)
add dc49daa HDDS-4198. Compile Ozone with multiple Java versions (#1387)
add 4b325a8 HDDS-4193. Range used by S3 MultipartUpload copy-from-source should be incusive (#1384)
add ce02172 HDDS-4202. Upgrade ratis to 1.1.0-ea949f1-SNAPSHOT (#1382)
add 53353c0 HDDS-4204. upgrade docker environment does not work with KEEP_RUNNING=true (#1388)
add 0a490cb HDDS-3441. Enable TestKeyManagerImpl test cases. (#1326)
add d2c0470 HDDS-4213. Log when a datanode has become dead in the DeadNodeHandler (#1402)
add 9a4cb9e HDDS-3151. TestCloseContainerHandlingByClient Enable-testMultiBlockW… (#1333)
add 49e3a1a HDDS-4064. Show container verbose info with verbose option (#1290)
add 22e4288 HDDS-4170 - Fix typo in method description. (#1406)
add 971a36e HDDS-4150. recon.api.TestEndpoints test is flaky (#1396)
add bfa2801 HDDS-4211. [OFS] Better owner and group display for listing Ozone volumes and buckets (#1397)
add a7c72fb HDDS-4196. Add an endpoint in Recon to query Prometheus (#1390)
add 4b96d83 HDDS-4228: add field 'num' to ALLOCATE_BLOCK of scm audit log. (#1413)
add 04ac1ef HDDS-4129. change MAX_QUOTA_IN_BYTES to Long.MAX_VALUE. (#1337)
add 2134c2e HDDS-4218.Remove test TestRatisManager (#1409)
add ab7481d HDDS-4217.Remove test TestOzoneContainerRatis (#1408)
add 72e3215 HDDS-4119. Improve performance of the BufferPool management of Ozone client (#1336)
add 48e8e50 HDDS-3927. Rename Ozone OM,DN,SCM runtime options to conform to naming conventions (#1401)
add 68869d1 HDDS-4155. Directory and filename can end up with same name in a path. (#1361)
add def697f HDDS-4039. Reduce the number of fields in hdds.proto to improve performance (#1289)
add 8e71e81 HDDS-4166. Documentation index page redirects to the wrong address (#1372)
add 045aa71 HDDS-4075. Retry request on different OM on AccessControlException (#1303)
add e84f5ce HDDS-4210. ResolveBucket during checkAcls fails. (#1398)
add 7beb2d0 HDDS-4053. Volume space: add quotaUsageInBytes and update it when write and delete key. (#1296)
add 570d34c HDDS-4244. Container deleted wrong replica cause mis-replicated. (#1423)
add 69c3e0e HDDS-4250. Fix wrong logger name (#1429)
add 079ee7f HDDS-4104. Provide a way to get the default value and key of java-based-configuration easily (#1369)
add 241de5a HDDS-4241. Support HADOOP_TOKEN_FILE_LOCATION for Ozone token CLI. (#1422)
add 0da6cfd HDDS-4247. Fixed log4j usage in some places (#1426)
add 410a246 HDDS-4255. Remove unused Ant and Jdiff dependency versions (#1433)
add 68d1ab0 HDDS-3981. Add more debug level log to XceiverClientGrpc for debug purpose (#1214)
add ce0c072 HDDS-3102. ozone getconf command should use the GenericCli parent class (#1410)
add 1e9ff6c HDDS-3947: Sort DNs for client when the key is a file for #getFileStatus #listStatus APIs (#1385)
add f3a60dc HDDS-4233. Interrupted exeception printed out from DatanodeStateMachine (#1416)
add b281d62 HDDS-4206. Attempt pipeline creation more frequently in acceptance tests (#1389)
add 0eceb4c HDDS-2766. security/SecuringDataNodes.md (#1175)
add a78a4b7 HDDS-4254. Bucket space: add usedBytes and update it when create and delete key. (#1431)
add 8ca694a HDDS-4236. Move "Om*Codec.java" to new project hadoop-ozone/interface-storage (#1424)
add 8899ff7 HDDS-4324. Add important comment to ListVolumes logic (#1417)
add 261d34d HDDS-3297. Enable TestOzoneClientKeyGenerator. (#1442)
add 64026dd HDDS-2660. Create insight point for datanode container protocol (#1272)
add 5f1900a HDDS-4270. Add more reusable byteman scripts to debug ofs/o3fs performance (#1443)
add c955729 HDDS-4194. Create a script to check AWS S3 compatibility (#1383)
add 7d0d330 HDDS-4282. Improve the emptyDir syntax (#1450)
add f1cdbe7 HDDS-4263. ReplicatiomManager shouldn't consider origin node Id for CLOSED containers. (#1438)
add 004dd3f HDDS-4102. Normalize Keypath for lookupKey. (#1328)
add 525ecbb HDDS-3727. Volume space: check quotaUsageInBytes when write key. (#1434)
add 6267a39 HDDS-4231. Background Service blocks on task results. (#1414)
add 34f3b91 HDDS-4232. Use single thread for KeyDeletingService. (#1415)
add 68642c2 HDDS-4023. Delete closed container after all blocks have been deleted. (#1338)
add 3ad1034 HDDS-4215. Update Freon doc in source tree. (#1403)
add 2420ee8 HDDS-4288. the icon of hadoop-ozone is bigger than ever (#1452)
add fdcc696 HDDS-4287: Exclude protobuff classes from ozone-filesystem-hadoop3 jars (#1455). Contributed by Uma Maheswara Rao G.
add d1ac423 HDDS-3751. Ozone sh client support bucket quota option. (#1412)
add be25991 HDDS-4292. Ozone Client not working with Hadoop Version < 3.2 (#1463)
add d6a1836 HDDS-4251. Update Ratis version to latest snapshot (#1462)
add 275653e HDDS-3869. Use different column families for datanode block and metadata (#1298)
add 819b455 HDDS-4302 : Shade the org.apache.common.lang3 package as this is coming from other hadoop packages as well. (#1469). Contributed by Uma Maheswara Rao G.
add 7216e3c HDDS-3966. Enable TestOMRatisSnapshots. (#1441)
add 55c9df8 HDDS-4290. Enable insight point for SCM heartbeat protocol (#1453)
add 60d2bcc HDDS-4274. Change the log level of the SCM Delete block to improve performance. (#1446)
add f8a62d6 HDDS-3810. Add the logic to distribute open containers among the pipelines of a datanode. (#1274)
add 5719615 HDDS-4304. Close Container event can fail if pipeline is removed first. (#1471)
add 8cd86a6 HDDS-4299. Display Ratis version with ozone version (#1464)
add cfff097 HDDS-4271. Avoid logging chunk content in Ozone Insight (#1466)
add 4ad0318 HDDS-4264. Uniform naming conventions of Ozone Shell Options. (#1447)
add d6d27e4 HDDS-4242. Copy PrefixInfo proto to new project hadoop-ozone/interface-storage (#1444)
add 19cb481 HDDS-4156. add hierarchical layout to Chinese doc (#1368)
add b6efb95 HDDS-4280. Document notable configurations for Recon. (#1448)
add 0d7d1e2 HDDS-4298. Use an interface in Ozone client instead of XceiverClientManager (#1460)
add f9b1ca4 HDDS-4310: Ozone getconf broke the compatibility (#1475)
add efaa4fc HDDS-4309. Fix inconsistency in recon config keys starting with recon and not ozone (#1478)
add e0a3baf HDDS-4325. Incompatible return codes from Ozone getconf -confKey (#1485). Contributed by Doroszlai, Attila.
add 55d1e91 HDDS-4316. Upgrade to angular 1.8.0 due to CVE-2020-7676 (#1481)
add d08a4c1 HDDS-3728. Bucket space: check quotaUsageInBytes when write key and allocate block. (#1458)
add 7704cb5 HDDS-3814. Drop a column family through debug cli tool (#1083)
add a1d53b0 HDDS-4311. Type-safe config design doc points to OM HA (#1477)
add 5c5d8cb HDDS-4312. findbugs check succeeds despite compile error (#1476)
add 35cc6b0 HDDS-4285. Read is slow due to frequent calls to UGI.getCurrentUser() and getTokens() (#1454)
add c956ce6 HDDS-4262. Use ClientID and CallID from Rpc Client to detect retry requests (#1436)
add dc889b4 Remove extra serialization from getBlockID (#1470)
add 7ae037e HDDS-4336. ContainerInfo does not persist BCSID (sequenceId) leading to failed replica reports (#1488)
add 7db0ea8 HDDS-4122. Implement OM Delete Expired Open Key Request and Response (#1435)
add 2650723 HDDS-4343. ReplicationManager.handleOverReplicatedContainer() does not handle unhealthyReplicas properly. (#1495)
add 814428e HDDS-3995. Fix s3g met NPE exception while write file by multiPartUpload (#1499)
add 342bf6d HDDS-4327. Potential resource leakage using BatchOperation. (#1493)
add aff7c52 HDDS-4269. Ozone DataNode thinks a volume is failed if an unexpected file is in the HDDS root directory. (#1490)
add 8fab5f2 HDDS-2922. Balance ratis leader distribution in datanodes (#1371)
add 25e2046 HDDS-4297. Allow multiple transactions per container to be sent for deletion by SCM.
add 9baaf64 HDDS-4158. Provide a class type for Java based configuration (#1407)
add 049793d HDDS-4301. SCM CA certificate does not encode KeyUsage extension properly (#1468)
add 10df489 HDDS-4328. Provide fallback cache restore key (#1505)
new 6de98c6 Merge branch 'master' into HDDS-2823
new 58394eb HDDS-3837. Add isLeader check in SCMHAManager.
new 3ed29d8 HDDS-4059. SCMStateMachine::applyTransaction() should not invoke TransactionContext.getClientRequest().
new d482abf HDDS-4125. Pipeline is not removed when a datanode goes stale.
new a70964e HDDS-4130. remove the 1st edition of RatisServer of SCM HA which is copied from OM HA.
new 9e0dd84 HDDS-3895. Implement container related operations in ContainerManagerImpl.
new 5f3981c HDDS-4115. CLI command to show current SCM leader and follower status.
new 9f7ab46 HDDS-3188. Add failover proxy for SCM block location.
new 5111126 HDDS-4192. enable SCM Raft Group based on config ozone.scm.names.
new 43b87fe HDDS-4365. SCMBlockLocationFailoverProxyProvider should use ScmBlockLocationProtocolPB.class in RPC.setProtocolEngine.
new 782057a Resolving master merge conflict.
This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version. This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:
* -- * -- B -- O -- O -- O (927c58a)
\
N -- N -- N refs/heads/HDDS-2823 (782057a)
You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.
Any revisions marked "omit" are not gone; other references still
refer to them. Any revisions marked "discard" are gone forever.
The 11 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails. The revisions
listed as "add" were already present in the repository and have only
been added to this reference.
Summary of changes:
.github/close-pending.sh | 41 ++
.github/closing-message.txt | 7 +
.github/comment-commands/close.sh | 10 +-
.github/comment-commands/pending.sh | 1 +
.../workflows/close-pending.yaml | 18 +-
.github/workflows/post-commit.yml | 317 +++++----
LICENSE.txt | 4 +-
.../byteman/appendlog.btm | 14 +-
dev-support/byteman/hcfs-read.btm | 67 ++
dev-support/byteman/hcfs-write.btm | 111 ++++
.../byteman/ratis-flush.btm | 28 +-
.../byteman/ratis-no-flush.btm | 11 +-
.../byteman/watchforcommit.btm | 29 +-
dev-support/byteman/watchforcommit_all.btm | 47 ++
hadoop-hdds/client/pom.xml | 15 +-
.../hadoop/hdds/scm/XceiverClientFactory.java | 23 +-
.../apache/hadoop/hdds/scm/XceiverClientGrpc.java | 48 +-
.../hadoop/hdds/scm/XceiverClientManager.java | 40 +-
.../apache/hadoop/hdds/scm/XceiverClientRatis.java | 11 +-
.../hadoop/hdds/scm/client/HddsClientUtils.java | 8 +-
.../hadoop/hdds/scm/storage/BlockInputStream.java | 62 +-
.../hadoop/hdds/scm/storage/BlockOutputStream.java | 158 +++--
.../apache/hadoop/hdds/scm/storage/BufferPool.java | 49 +-
.../hadoop/hdds/scm/storage/ChunkInputStream.java | 10 +-
.../hadoop/hdds/scm/storage/CommitWatcher.java | 37 +-
.../hdds/scm/storage/DummyChunkInputStream.java | 2 +-
.../storage/TestBlockOutputStreamCorrectness.java | 224 +++++++
.../hadoop/hdds/scm/storage/TestBufferPool.java | 46 +-
hadoop-hdds/common/pom.xml | 14 +-
hadoop-hdds/common/src/main/conf/hadoop-env.sh | 13 +-
.../org/apache/hadoop/hdds/cli/GenericCli.java | 21 +
.../hadoop/hdds/cli/SubcommandWithParent.java} | 13 +-
.../org/apache/hadoop/hdds/cli/package-info.java | 4 +-
.../org/apache/hadoop/hdds/client/OzoneQuota.java | 240 ++++---
.../hadoop/hdds/protocol/DatanodeDetails.java | 83 ++-
.../org/apache/hadoop/hdds/ratis/RatisHelper.java | 19 +
.../RequestTypeDependentRetryPolicyCreator.java | 8 +-
.../apache/hadoop/hdds/recon/ReconConfigKeys.java | 4 +
.../hadoop/hdds/scm/ByteStringConversion.java | 18 +-
.../PipelineChoosePolicy.java} | 19 +-
.../PipelineRequestInformation.java} | 49 +-
.../java/org/apache/hadoop/hdds/scm/ScmConfig.java | 23 +
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 13 +
.../apache/hadoop/hdds/scm/XceiverClientSpi.java | 15 +-
.../hadoop/hdds/scm/container/ContainerInfo.java | 2 +
.../hadoop/hdds/scm/exceptions/SCMException.java | 2 +
.../hadoop/hdds/scm/net/NetworkTopologyImpl.java | 2 +-
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 42 +-
.../hdds/scm/storage/ContainerProtocolCalls.java | 229 ++++---
.../x509/certificate/utils/CertificateCodec.java | 2 +-
.../hadoop/hdds/utils/BackgroundService.java | 73 +--
.../apache/hadoop/hdds/utils/BackgroundTask.java | 4 +-
.../hadoop/hdds/utils/BackgroundTaskQueue.java | 5 +-
.../{VersionInfo.java => RatisVersionInfo.java} | 66 +-
.../org/apache/hadoop/hdds/utils/Scheduler.java | 2 +-
.../org/apache/hadoop/hdds/utils/VersionInfo.java | 9 +-
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 1 +
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 57 +-
.../apache/hadoop/ozone/common/ChunkBuffer.java | 14 +-
.../common/ChunkBufferImplWithByteBuffer.java | 10 +-
.../org/apache/hadoop/ozone/common/Storage.java | 2 +-
.../container/common/helpers/ChunkInfoList.java | 56 ++
.../common/src/main/resources/ozone-default.xml | 55 +-
.../hadoop/hdds/conf/SimpleConfiguration.java | 13 +
.../hadoop/hdds/conf/TestOzoneConfiguration.java | 7 +-
.../hadoop/hdds/protocol/MockDatanodeDetails.java | 2 +-
.../hadoop/hdds/scm/pipeline/MockPipeline.java | 29 +-
.../hadoop/ozone/audit/TestOzoneAuditLogger.java | 62 +-
.../hadoop/ozone/common/TestChunkBuffer.java | 16 +-
hadoop-hdds/config/pom.xml | 4 +-
.../org/apache/hadoop/hdds/conf/ConfigType.java | 3 +-
.../hdds/conf/ConfigurationReflectionUtil.java | 57 ++
...{ConfigType.java => InMemoryConfiguration.java} | 50 +-
.../hdds/conf/TestConfigurationReflectionUtil.java | 111 ++++
hadoop-hdds/container-service/pom.xml | 11 +-
.../container/common/helpers/ContainerUtils.java | 66 +-
.../container/common/impl/ContainerDataYaml.java | 3 +
.../container/common/impl/HddsDispatcher.java | 61 +-
.../container/common/interfaces/BlockIterator.java | 5 +-
.../container/common/interfaces/Container.java | 7 -
.../common/statemachine/DatanodeStateMachine.java | 7 +-
.../common/statemachine/SCMConnectionManager.java | 9 +-
.../CloseContainerCommandHandler.java | 2 +-
.../CreatePipelineCommandHandler.java | 12 +-
.../commandhandler/DeleteBlocksCommandHandler.java | 92 +--
.../states/endpoint/RegisterEndpointTask.java | 6 +-
.../common/transport/server/XceiverServerSpi.java | 10 +-
.../server/ratis/ContainerStateMachine.java | 2 +-
.../transport/server/ratis/XceiverServerRatis.java | 33 +-
.../container/common/utils/ContainerCache.java | 34 +-
.../container/common/utils/HddsVolumeUtil.java | 7 +-
.../container/common/utils/ReferenceCountedDB.java | 10 +-
.../container/keyvalue/KeyValueBlockIterator.java | 156 -----
.../container/keyvalue/KeyValueContainer.java | 29 +-
.../container/keyvalue/KeyValueContainerCheck.java | 12 +-
.../container/keyvalue/KeyValueContainerData.java | 48 +-
.../ozone/container/keyvalue/KeyValueHandler.java | 18 +-
.../container/keyvalue/helpers/BlockUtils.java | 2 +-
.../keyvalue/helpers/KeyValueContainerUtil.java | 184 ++++--
.../container/keyvalue/impl/BlockManagerImpl.java | 124 ++--
.../keyvalue/interfaces/BlockManager.java | 12 +
.../background/BlockDeletingService.java | 75 ++-
.../metadata/AbstractDatanodeDBDefinition.java | 74 +++
.../container/metadata/AbstractDatanodeStore.java | 297 +++++++++
.../ozone/container/metadata/BlockDataCodec.java | 47 ++
.../container/metadata/ChunkInfoListCodec.java | 45 ++
.../metadata/DatanodeSchemaOneDBDefinition.java | 91 +++
.../metadata/DatanodeSchemaTwoDBDefinition.java | 81 +++
.../ozone/container/metadata/DatanodeStore.java | 94 +++
.../metadata/DatanodeStoreSchemaOneImpl.java | 49 ++
.../metadata/DatanodeStoreSchemaTwoImpl.java | 44 ++
.../ozone/container/metadata/DatanodeTable.java | 130 ++++
.../metadata/SchemaOneChunkInfoListCodec.java | 68 ++
.../metadata/SchemaOneDeletedBlocksTable.java | 180 +++++
.../container/metadata/SchemaOneKeyCodec.java | 106 +++
.../ozone/container/metadata}/package-info.java | 8 +-
.../protocol/StorageContainerDatanodeProtocol.java | 14 +-
.../protocol/commands/CreatePipelineCommand.java | 46 +-
...inerDatanodeProtocolClientSideTranslatorPB.java | 12 +-
...inerDatanodeProtocolServerSideTranslatorPB.java | 4 +-
.../main/resources/webapps/hddsDatanode/index.html | 4 +-
.../ozone/container/ContainerTestHelper.java | 5 +
.../hadoop/ozone/container/common/ScmTestMock.java | 9 +-
.../container/common/TestBlockDeletingService.java | 113 +++-
.../ozone/container/common/TestContainerCache.java | 25 +-
.../common/TestKeyValueContainerData.java | 3 +
.../TestSchemaOneBackwardsCompatibility.java | 626 ++++++++++++++++++
.../common/impl/TestContainerDataYaml.java | 3 +
.../TestCreatePipelineCommandHandler.java | 7 +-
.../keyvalue/TestKeyValueBlockIterator.java | 340 ++++++----
.../container/keyvalue/TestKeyValueContainer.java | 75 +--
.../keyvalue/TestKeyValueContainerCheck.java | 18 +-
.../container/ozoneimpl/TestContainerReader.java | 59 +-
.../container/ozoneimpl/TestOzoneContainer.java | 14 +-
.../test/resources/123-dn-container.db/000024.sst | Bin 0 -> 1022 bytes
.../test/resources/123-dn-container.db/000026.sst | Bin 0 -> 827 bytes
.../test/resources/123-dn-container.db/000032.sst | Bin 0 -> 896 bytes
.../test/resources/123-dn-container.db/000034.log | 0
.../src/test/resources/123-dn-container.db/CURRENT | 1 +
.../test/resources/123-dn-container.db/IDENTITY | 1 +
.../resources/123-dn-container.db/MANIFEST-000033 | Bin 0 -> 297 bytes
.../resources/123-dn-container.db/OPTIONS-000033 | 165 +++++
.../resources/123-dn-container.db/OPTIONS-000036 | 165 +++++
.../src/test/resources/123.container | 10 +
hadoop-hdds/docs/content/_index.md | 5 +-
hadoop-hdds/docs/content/_index.zh.md | 2 +-
hadoop-hdds/docs/content/beyond/Containers.md | 234 -------
hadoop-hdds/docs/content/beyond/Containers.zh.md | 203 ------
.../docs/content/beyond/DockerCheatSheet.md | 88 ---
.../docs/content/beyond/DockerCheatSheet.zh.md | 85 ---
hadoop-hdds/docs/content/beyond/_index.md | 30 -
hadoop-hdds/docs/content/beyond/_index.zh.md | 27 -
hadoop-hdds/docs/content/concept/Containers.md | 47 ++
hadoop-hdds/docs/content/concept/Containers.png | Bin 0 -> 24775 bytes
hadoop-hdds/docs/content/concept/Datanodes.md | 5 +-
hadoop-hdds/docs/content/concept/Datanodes.zh.md | 3 +
hadoop-hdds/docs/content/concept/Hdds.md | 52 --
hadoop-hdds/docs/content/concept/Overview.md | 7 +-
hadoop-hdds/docs/content/concept/Overview.zh.md | 7 +-
.../docs/content/concept/OzoneManager-ReadPath.png | Bin 0 -> 81030 bytes
.../content/concept/OzoneManager-WritePath.png | Bin 0 -> 96696 bytes
hadoop-hdds/docs/content/concept/OzoneManager.md | 63 +-
hadoop-hdds/docs/content/concept/OzoneManager.png | Bin 0 -> 13327 bytes
.../docs/content/concept/OzoneManager.zh.md | 9 +
.../content/concept/StorageContainerManager.md | 99 +++
.../content/concept/StorageContainerManager.png | Bin 0 -> 13336 bytes
.../{Hdds.zh.md => StorageContainerManager.zh.md} | 9 +
hadoop-hdds/docs/content/concept/_index.md | 4 +-
hadoop-hdds/docs/content/concept/_index.zh.md | 2 +-
hadoop-hdds/docs/content/design/ec.md | 39 ++
.../docs/content/design/namespace-support.md | 6 +-
hadoop-hdds/docs/content/design/ofs.md | 135 ----
.../docs/content/design/storage-class.md | 17 +-
.../design/{namespace-support.md => topology.md} | 18 +-
hadoop-hdds/docs/content/design/typesafeconfig.md | 10 +-
...e-volume-management.md => volume-management.md} | 0
hadoop-hdds/docs/content/feature/GDPR.md | 80 +++
.../GDPR in Ozone.zh.md => feature/GDPR.zh.md} | 5 +
.../docs/content/feature/HA-OM-doublebuffer.png | Bin 0 -> 77661 bytes
hadoop-hdds/docs/content/feature/HA-OM.png | Bin 0 -> 60888 bytes
hadoop-hdds/docs/content/feature/HA.md | 115 ++++
hadoop-hdds/docs/content/feature/Observability.md | 224 +++++++
hadoop-hdds/docs/content/feature/Recon.md | 47 ++
hadoop-hdds/docs/content/feature/Topology.md | 108 +++
.../docs/content/{gdpr => feature}/_index.md | 12 +-
.../docs/content/{gdpr => feature}/_index.zh.md | 0
hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md | 42 --
hadoop-hdds/docs/content/interface/CSI.md | 15 +-
hadoop-hdds/docs/content/interface/CSI.png | Bin 0 -> 27210 bytes
hadoop-hdds/docs/content/interface/CSI.zh.md | 92 +++
hadoop-hdds/docs/content/interface/Cli.md | 208 ++++++
hadoop-hdds/docs/content/interface/JavaApi.md | 5 +-
hadoop-hdds/docs/content/interface/JavaApi.zh.md | 3 +
.../docs/content/interface/{OzoneFS.md => O3fs.md} | 79 +--
.../interface/{OzoneFS.zh.md => O3fs.zh.md} | 14 +-
.../content/{design/ofs.md => interface/Ofs.md} | 121 +++-
hadoop-hdds/docs/content/interface/S3.md | 23 +-
hadoop-hdds/docs/content/interface/S3.zh.md | 3 +
hadoop-hdds/docs/content/interface/_index.md | 4 +-
hadoop-hdds/docs/content/recipe/Prometheus.md | 5 +-
hadoop-hdds/docs/content/recipe/Prometheus.zh.md | 7 +-
hadoop-hdds/docs/content/security/SecureOzone.md | 3 +
.../docs/content/security/SecureOzone.zh.md | 6 +-
.../docs/content/security/SecuringDatanodes.md | 39 +-
.../docs/content/security/SecuringDatanodes.zh.md | 53 ++
.../docs/content/security/SecuringOzoneHTTP.md | 7 +-
hadoop-hdds/docs/content/security/SecuringS3.md | 5 +-
hadoop-hdds/docs/content/security/SecuringS3.zh.md | 3 +
hadoop-hdds/docs/content/security/SecuringTDE.md | 5 +-
.../docs/content/security/SecuringTDE.zh.md | 3 +
hadoop-hdds/docs/content/security/SecurityAcls.md | 3 +
.../docs/content/security/SecurityAcls.zh.md | 3 +
...{SecuityWithRanger.md => SecurityWithRanger.md} | 5 +-
...tyWithRanger.zh.md => SecurityWithRanger.zh.md} | 3 +
hadoop-hdds/docs/content/shell/BucketCommands.md | 100 ---
.../docs/content/shell/BucketCommands.zh.md | 98 ---
hadoop-hdds/docs/content/shell/Format.md | 69 --
hadoop-hdds/docs/content/shell/Format.zh.md | 65 --
hadoop-hdds/docs/content/shell/KeyCommands.md | 177 -----
hadoop-hdds/docs/content/shell/KeyCommands.zh.md | 176 -----
hadoop-hdds/docs/content/shell/VolumeCommands.md | 114 ----
.../docs/content/shell/VolumeCommands.zh.md | 108 ---
hadoop-hdds/docs/content/shell/_index.md | 28 -
hadoop-hdds/docs/content/shell/_index.zh.md | 27 -
hadoop-hdds/docs/content/start/FromSource.md | 38 +-
hadoop-hdds/docs/content/start/FromSource.zh.md | 7 +-
hadoop-hdds/docs/content/tools/TestTools.md | 14 +-
hadoop-hdds/docs/content/tools/TestTools.zh.md | 14 +-
hadoop-hdds/docs/content/tools/_index.md | 6 +-
hadoop-hdds/docs/pom.xml | 4 +-
.../themes/ozonedoc/layouts/_default/single.html | 2 +-
.../themes/ozonedoc/layouts/design/section.html | 2 +-
.../ozonedoc/layouts/partials/languages.html | 3 +-
.../themes/ozonedoc/layouts/partials/navbar.html | 6 +-
.../themes/ozonedoc/layouts/partials/sidebar.html | 14 +-
.../docs/themes/ozonedoc/static/css/ozonedoc.css | 23 +-
hadoop-hdds/framework/pom.xml | 4 +-
.../x509/certificate/authority/BaseApprover.java | 2 +-
.../certificates/utils/CertificateSignRequest.java | 2 +-
.../certificates/utils/SelfSignedCertificate.java | 4 +-
.../server/OzoneProtocolMessageDispatcher.java | 41 +-
.../apache/hadoop/hdds/utils/HddsServerUtil.java | 13 +
.../hadoop/hdds/utils/MetadataKeyFilters.java | 42 +-
.../apache/hadoop/hdds/utils/db/DBDefinition.java | 41 +-
.../org/apache/hadoop/hdds/utils/db/DBStore.java | 8 +-
.../hadoop/hdds/utils/db/DBStoreBuilder.java | 41 +-
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 17 +-
.../org/apache/hadoop/hdds/utils/db/RDBTable.java | 105 +++
.../org/apache/hadoop/hdds/utils/db/Table.java | 69 ++
.../apache/hadoop/hdds/utils/db/TypedTable.java | 59 ++
.../resources/webapps/static/angular-1.7.9.min.js | 350 ----------
.../resources/webapps/static/angular-1.8.0.min.js | 350 ++++++++++
...ute-1.7.9.min.js => angular-route-1.8.0.min.js} | 6 +-
.../apache/hadoop/hdds/server/TestJsonUtils.java | 5 +-
hadoop-hdds/hadoop-dependency-client/README.md | 18 +-
hadoop-hdds/hadoop-dependency-client/pom.xml | 4 +-
hadoop-hdds/hadoop-dependency-server/pom.xml | 4 +-
hadoop-hdds/hadoop-dependency-test/pom.xml | 4 +-
hadoop-hdds/interface-admin/pom.xml | 4 +-
hadoop-hdds/interface-client/pom.xml | 4 +-
.../src/main/proto/DatanodeClientProtocol.proto | 4 +
.../interface-client/src/main/proto/hdds.proto | 18 +-
.../interface-client/src/main/resources/proto.lock | 40 +-
hadoop-hdds/interface-server/pom.xml | 4 +-
.../proto/ScmServerDatanodeHeartbeatProtocol.proto | 3 +-
.../src/main/proto/ScmServerProtocol.proto | 4 +-
.../interface-server/src/main/resources/proto.lock | 104 +--
hadoop-hdds/pom.xml | 10 +-
hadoop-hdds/server-scm/pom.xml | 5 +-
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 16 +-
.../block/DatanodeDeletedBlockTransactions.java | 32 +-
.../hadoop/hdds/scm/block/DeletedBlockLogImpl.java | 24 +-
.../hdds/scm/block/SCMBlockDeletingService.java | 7 +-
.../container/AbstractContainerReportHandler.java | 101 ++-
.../scm/container/CloseContainerEventHandler.java | 2 +-
.../hdds/scm/container/ContainerReplica.java | 43 +-
.../hdds/scm/container/ContainerReportHandler.java | 14 +-
.../IncrementalContainerReportHandler.java | 2 +-
.../hdds/scm/container/ReplicationManager.java | 133 +++-
.../hdds/scm/container/SCMContainerManager.java | 66 +-
.../apache/hadoop/hdds/scm/ha/SCMHAManager.java | 2 +-
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 2 +-
.../apache/hadoop/hdds/scm/node/DatanodeInfo.java | 33 +
.../hadoop/hdds/scm/node/DeadNodeHandler.java | 2 +-
.../hadoop/hdds/scm/node/NewNodeHandler.java | 2 +-
.../apache/hadoop/hdds/scm/node/NodeManager.java | 6 +-
.../hadoop/hdds/scm/node/NodeStateManager.java | 30 +-
.../scm/node/NonHealthyToHealthyNodeHandler.java | 2 +-
.../hadoop/hdds/scm/node/SCMNodeManager.java | 53 +-
.../scm/pipeline/BackgroundPipelineCreator.java | 35 +-
.../hadoop/hdds/scm/pipeline/PipelineFactory.java | 3 +-
.../hadoop/hdds/scm/pipeline/PipelineManager.java | 6 +-
.../hdds/scm/pipeline/PipelineManagerMXBean.java | 2 +-
.../hdds/scm/pipeline/PipelineManagerV2Impl.java | 24 +-
.../hdds/scm/pipeline/PipelinePlacementPolicy.java | 42 +-
.../hdds/scm/pipeline/PipelineReportHandler.java | 2 +-
.../hdds/scm/pipeline/PipelineStateManager.java | 5 -
.../scm/pipeline/PipelineStateManagerV2Impl.java | 5 -
.../hdds/scm/pipeline/RatisPipelineProvider.java | 39 +-
.../hdds/scm/pipeline/RatisPipelineUtils.java | 2 +-
.../hdds/scm/pipeline/SCMPipelineManager.java | 16 +-
.../hadoop/hdds/scm/pipeline/StateManager.java | 1 -
.../algorithms/HealthyPipelineChoosePolicy.java} | 28 +-
.../algorithms/PipelineChoosePolicyFactory.java | 106 +++
.../algorithms/RandomPipelineChoosePolicy.java} | 20 +-
.../pipeline/choose/algorithms/package-info.java} | 14 +-
.../algorithms/DefaultLeaderChoosePolicy.java} | 24 +-
.../choose/algorithms/LeaderChoosePolicy.java | 55 ++
.../algorithms/LeaderChoosePolicyFactory.java | 75 +++
.../algorithms/MinLeaderCountChoosePolicy.java | 91 +++
.../leader/choose/algorithms/package-info.java} | 13 +-
.../SCMSecurityProtocolServerSideTranslatorPB.java | 17 +-
...lockLocationProtocolServerSideTranslatorPB.java | 2 +-
...inerLocationProtocolServerSideTranslatorPB.java | 17 +-
.../hdds/scm/safemode/ContainerSafeModeRule.java | 4 +-
.../hdds/scm/server/SCMBlockProtocolServer.java | 7 +-
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 4 +-
.../apache/hadoop/hdds/scm/server/SCMMXBean.java | 4 +
.../hdds/scm/server/StorageContainerManager.java | 16 +
.../src/main/resources/webapps/scm/index.html | 4 +-
.../main/resources/webapps/scm/scm-overview.html | 18 +-
.../src/main/resources/webapps/scm/scm.js | 4 -
.../java/org/apache/hadoop/hdds/scm/TestUtils.java | 1 +
.../hadoop/hdds/scm/block/TestBlockManager.java | 69 ++
.../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 17 +-
.../hadoop/hdds/scm/container/MockNodeManager.java | 41 +-
.../scm/container/TestContainerReportHandler.java | 197 +++++-
.../hdds/scm/container/TestReplicationManager.java | 92 +++
.../hadoop/hdds/scm/ha/MockSCMHAManager.java | 4 +-
.../hadoop/hdds/scm/ha/TestSCMRatisResponse.java | 4 +-
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 5 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 4 +-
.../hdds/scm/pipeline/MockPipelineManager.java | 15 +-
.../scm/pipeline/MockRatisPipelineProvider.java | 9 +-
.../scm/pipeline/TestPipelineActionHandler.java | 2 +-
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 2 +-
.../scm/pipeline/TestPipelinePlacementPolicy.java | 85 ++-
.../hdds/scm/pipeline/TestSCMPipelineManager.java | 74 +++
.../TestPipelineChoosePolicyFactory.java | 94 +++
.../choose/algorithms/TestLeaderChoosePolicy.java | 74 +++
.../ozone/container/common/TestEndPoint.java | 2 +-
.../testutils/ReplicationNodeManagerMock.java | 12 +-
hadoop-hdds/test-utils/pom.xml | 4 +-
hadoop-hdds/tools/pom.xml | 12 +-
.../org/apache/hadoop/hdds/cli/OzoneAdmin.java | 67 ++
.../org/apache/hadoop/hdds/cli/package-info.java | 6 +-
.../hdds/scm/cli/ReplicationManagerCommands.java | 23 +-
.../scm/cli/ReplicationManagerStartSubcommand.java | 21 +-
.../cli/ReplicationManagerStatusSubcommand.java | 32 +-
.../scm/cli/ReplicationManagerStopSubcommand.java | 25 +-
.../hdds/scm/cli/SafeModeCheckSubcommand.java | 40 +-
.../hadoop/hdds/scm/cli/SafeModeCommands.java | 27 +-
.../hdds/scm/cli/SafeModeExitSubcommand.java | 22 +-
.../hdds/scm/cli/SafeModeWaitSubcommand.java | 13 +-
.../org/apache/hadoop/hdds/scm/cli/ScmOption.java | 72 ++
.../WithScmClient.java => ScmSubcommand.java} | 24 +-
.../hadoop/hdds/scm/cli/TopologySubcommand.java | 65 +-
.../hdds/scm/cli/container/CloseSubcommand.java | 20 +-
.../hdds/scm/cli/container/ContainerCommands.java | 21 +-
.../hdds/scm/cli/container/CreateSubcommand.java | 26 +-
.../hdds/scm/cli/container/DeleteSubcommand.java | 20 +-
.../hdds/scm/cli/container/InfoSubcommand.java | 48 +-
.../hdds/scm/cli/container/ListSubcommand.java | 32 +-
.../hdds/scm/cli/datanode/DatanodeCommands.java | 21 +-
.../hdds/scm/cli/datanode/ListInfoSubcommand.java | 48 +-
.../cli/pipeline/ActivatePipelineSubcommand.java | 19 +-
.../scm/cli/pipeline/ClosePipelineSubcommand.java | 19 +-
.../scm/cli/pipeline/CreatePipelineSubcommand.java | 48 +-
.../cli/pipeline/DeactivatePipelineSubcommand.java | 19 +-
.../scm/cli/pipeline/ListPipelinesSubcommand.java | 40 +-
.../hdds/scm/cli/pipeline/PipelineCommands.java | 22 +-
hadoop-ozone/client/pom.xml | 4 +-
.../org/apache/hadoop/ozone/client/BucketArgs.java | 43 +-
.../apache/hadoop/ozone/client/ObjectStore.java | 3 +
.../apache/hadoop/ozone/client/OzoneBucket.java | 103 +++
.../hadoop/ozone/client/OzoneClientFactory.java | 8 +-
.../apache/hadoop/ozone/client/OzoneVolume.java | 103 ++-
.../org/apache/hadoop/ozone/client/VolumeArgs.java | 45 +-
.../ozone/client/io/BlockOutputStreamEntry.java | 30 +-
.../client/io/BlockOutputStreamEntryPool.java | 34 +-
.../hadoop/ozone/client/io/KeyInputStream.java | 98 +--
.../hadoop/ozone/client/io/KeyOutputStream.java | 60 +-
.../hadoop/ozone/client/io/OzoneInputStream.java | 5 +
.../ozone/client/protocol/ClientProtocol.java | 29 +-
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 110 +++-
hadoop-ozone/common/pom.xml | 4 +-
.../main/java/org/apache/hadoop/ozone/OmUtils.java | 28 +
.../org/apache/hadoop/ozone/audit/OMAction.java | 1 +
.../apache/hadoop/ozone/freon/OzoneGetConf.java | 278 --------
.../hadoop/ozone/om/exceptions/OMException.java | 7 +-
.../ozone/om/exceptions/OMNotLeaderException.java | 2 +-
.../ozone/om/ha/OMFailoverProxyProvider.java | 253 ++++++--
.../hadoop/ozone/om/helpers/OmBucketArgs.java | 50 +-
.../hadoop/ozone/om/helpers/OmBucketInfo.java | 78 ++-
.../hadoop/ozone/om/helpers/OmKeyLocationInfo.java | 6 +-
.../ozone/om/helpers/OmKeyLocationInfoGroup.java | 8 +-
.../hadoop/ozone/om/helpers/OmRenameKeys.java | 59 ++
.../hadoop/ozone/om/helpers/OmVolumeArgs.java | 64 +-
.../hadoop/ozone/om/helpers/OzoneFileStatus.java | 18 +
.../ozone/om/protocol/OzoneManagerProtocol.java | 15 +-
.../ozone/om/protocolPB/Hadoop3OmTransport.java | 175 +----
...OzoneManagerProtocolClientSideTranslatorPB.java | 44 +-
.../ozone/security/OzoneTokenIdentifier.java | 5 +-
.../apache/hadoop/ozone/util/OzoneVersionInfo.java | 15 +-
.../hadoop/ozone/om/helpers/TestOmVolumeArgs.java | 4 +-
hadoop-ozone/csi/pom.xml | 4 +-
hadoop-ozone/datanode/pom.xml | 4 +-
hadoop-ozone/dev-support/checks/build.sh | 2 +-
hadoop-ozone/dev-support/checks/findbugs.sh | 7 +-
hadoop-ozone/dev-support/checks/kubernetes.sh | 2 +-
hadoop-ozone/dev-support/intellij/core-site.xml | 27 +
hadoop-ozone/dist/README.md | 52 +-
.../dist/dev-support/bin/dist-layout-stitching | 3 +
hadoop-ozone/dist/pom.xml | 4 +-
hadoop-ozone/dist/src/main/compose/failing1/.env | 1 +
.../src/main/compose/failing1/docker-compose.yaml | 1 +
.../dist/src/main/compose/failing1/docker-config | 1 +
.../compose/{ozone-om-ha-s3 => failing1}/test.sh | 10 +-
hadoop-ozone/dist/src/main/compose/failing2/.env | 1 +
.../src/main/compose/failing2/docker-compose.yaml | 1 +
.../dist/src/main/compose/failing2/docker-config | 1 +
.../compose/{ozone-om-ha-s3 => failing2}/test.sh | 10 +-
.../dist/src/main/compose/ozone-csi/docker-config | 3 +
.../dist/src/main/compose/{ozone => ozone-ha}/.env | 0
.../src/main/compose/ozone-ha/docker-compose.yaml | 93 +++
.../compose/{ozone-csi => ozone-ha}/docker-config | 19 +-
.../compose/{ozone-om-ha-s3 => ozone-ha}/test.sh | 7 +-
.../dist/src/main/compose/ozone-mr/common-config | 3 +-
.../main/compose/ozone-mr/hadoop27/docker-config | 1 +
.../src/main/compose/ozone-mr/hadoop27/test.sh | 7 +-
.../main/compose/ozone-mr/hadoop31/docker-config | 1 +
.../src/main/compose/ozone-mr/hadoop31/test.sh | 7 +-
.../main/compose/ozone-mr/hadoop32/docker-config | 1 +
.../src/main/compose/ozone-mr/hadoop32/test.sh | 7 +-
.../dist/src/main/compose/ozone-mr/test.sh | 22 +-
.../src/main/compose/ozone-om-ha-s3/docker-config | 5 +-
.../dist/src/main/compose/ozone-om-ha-s3/test.sh | 2 +
.../src/main/compose/ozone-om-ha/docker-config | 4 +-
.../src/main/compose/ozone-topology/docker-config | 7 +-
.../dist/src/main/compose/ozone/docker-config | 7 +-
.../src/main/compose/ozoneblockade/docker-config | 2 +
.../src/main/compose/ozones3-haproxy/docker-config | 3 +
.../src/main/compose/ozonesecure-mr/docker-config | 4 +-
.../dist/src/main/compose/ozonesecure-mr/test.sh | 5 +-
.../main/compose/ozonesecure-om-ha/docker-config | 11 +-
.../src/main/compose/ozonesecure-om-ha/test.sh | 2 +
.../src/main/compose/ozonesecure/docker-config | 9 +-
hadoop-ozone/dist/src/main/compose/test-all.sh | 21 +-
hadoop-ozone/dist/src/main/compose/testlib.sh | 48 +-
.../dist/src/main/compose/upgrade/README.md | 2 +-
.../dist/src/main/compose/upgrade/docker-config | 3 +-
hadoop-ozone/dist/src/main/compose/upgrade/test.sh | 5 +-
.../src/main/compose/upgrade/versions/README.md | 2 +-
.../main/compose/upgrade/versions/ozone-0.5.0.sh | 1 +
.../versions/{ozone-0.6.0.sh => ozone-1.0.0.sh} | 1 +
.../main/k8s/definitions/ozone/freon/freon.yaml | 2 +-
.../getting-started/freon/freon-deployment.yaml | 2 +-
.../src/main/k8s/examples/getting-started/test.sh | 2 +
.../examples/minikube/freon/freon-deployment.yaml | 2 +-
.../dist/src/main/k8s/examples/minikube/test.sh | 2 +
.../ozone-dev/csi/csi-provisioner-deployment.yaml | 2 +-
.../examples/ozone-dev/freon/freon-deployment.yaml | 2 +-
.../dist/src/main/k8s/examples/ozone-dev/test.sh | 2 +
.../ozone/csi/csi-provisioner-deployment.yaml | 2 +-
.../k8s/examples/ozone/freon/freon-deployment.yaml | 2 +-
.../dist/src/main/k8s/examples/ozone/test.sh | 2 +
.../dist/src/main/k8s/examples/test-all.sh | 11 +-
hadoop-ozone/dist/src/main/k8s/examples/testlib.sh | 11 +-
hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 4 +-
.../loaddata.robot => admincli/admin.robot} | 23 +-
.../src/main/smoketest/admincli/container.robot | 73 +++
.../src/main/smoketest/admincli/datanode.robot | 19 +-
.../src/main/smoketest/admincli/pipeline.robot | 49 +-
.../smoketest/admincli/replicationmanager.robot | 53 ++
.../src/main/smoketest/admincli/safemode.robot | 45 ++
.../main/smoketest/auditparser/auditparser.robot | 2 +-
.../dist/src/main/smoketest/basic/basic.robot | 2 +-
.../dist/src/main/smoketest/basic/getconf.robot | 46 ++
.../{ozone-shell.robot => ozone-shell-lib.robot} | 78 ++-
.../ozone-shell-single.robot} | 11 +-
.../src/main/smoketest/basic/ozone-shell.robot | 121 +---
.../dist/src/main/smoketest/createbucketenv.robot | 2 +-
.../dist/src/main/smoketest/createmrenv.robot | 15 +-
.../src/main/smoketest/debug/ozone-debug.robot | 6 +-
.../src/main/smoketest/failing/test1.robot} | 7 +-
.../src/main/smoketest/failing/test2.robot} | 7 +-
.../dist/src/main/smoketest/freon/freon.robot | 2 +-
.../dist/src/main/smoketest/gdpr/gdpr.robot | 2 +-
.../dist/src/main/smoketest/mapreduce.robot | 11 +-
.../main/smoketest/om-ratis/testOMAdminCmd.robot | 2 +-
.../dist/src/main/smoketest/omha/testOMHA.robot | 2 +-
.../src/main/smoketest/ozonefs/hadoopo3fs.robot | 12 +-
.../dist/src/main/smoketest/ozonefs/setup.robot | 8 +-
.../dist/src/main/smoketest/recon/recon-api.robot | 2 +-
hadoop-ozone/dist/src/main/smoketest/robot.robot | 81 ---
.../src/main/smoketest/s3/MultipartUpload.robot | 130 ++--
.../dist/src/main/smoketest/s3/commonawslib.robot | 5 +
.../dist/src/main/smoketest/s3/objectcopy.robot | 23 +-
.../dist/src/main/smoketest/s3/objectdelete.robot | 28 +-
.../src/main/smoketest/s3/objectmultidelete.robot | 24 +-
.../dist/src/main/smoketest/s3/objectputget.robot | 40 +-
.../src/main/smoketest/s3/s3_compatbility_check.sh | 47 ++
.../smoketest/security/ozone-secure-token.robot | 16 +-
.../dist/src/main/smoketest/spnego/web.robot | 2 +-
.../src/main/smoketest/topology/loaddata.robot | 2 +-
hadoop-ozone/dist/src/shell/ozone/ozone | 17 +-
hadoop-ozone/dist/src/shell/ozone/stop-ozone.sh | 8 +-
.../dist/src/shell/upgrade/{0.6.0.sh => 1.0.0.sh} | 2 +-
.../upgrade/{0.6.0 => 1.0.0}/01-migrate-scm-db.sh | 0
.../fault-injection-test/mini-chaos-tests/pom.xml | 4 +-
.../hadoop/ozone/TestMiniChaosOzoneCluster.java | 40 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
.../fault-injection-test/network-tests/pom.xml | 2 +-
.../src/test/blockade/ozone/client.py | 10 +-
hadoop-ozone/fault-injection-test/pom.xml | 4 +-
hadoop-ozone/insight/pom.xml | 4 +-
.../hadoop/ozone/insight/BaseInsightPoint.java | 34 +-
.../ozone/insight/BaseInsightSubCommand.java | 6 +
.../org/apache/hadoop/ozone/insight/Insight.java | 4 +
.../apache/hadoop/ozone/insight/InsightPoint.java | 4 +-
.../hadoop/ozone/insight/MetricGroupDisplay.java | 4 +-
.../hadoop/ozone/insight/MetricsSubCommand.java | 33 +-
.../datanode/DatanodeDispatcherInsight.java | 107 +++
.../insight/datanode/PipelineComponentUtil.java | 78 +++
.../ozone/insight/datanode/RatisInsight.java | 45 +-
.../hadoop/ozone/insight/om/KeyManagerInsight.java | 2 +-
.../hadoop/ozone/insight/om/OmProtocolInsight.java | 2 +-
.../ozone/insight/scm/NodeManagerInsight.java | 2 +-
.../ozone/insight/scm/ReplicaManagerInsight.java | 2 +-
.../scm/ScmProtocolBlockLocationInsight.java | 2 +-
.../scm/ScmProtocolContainerLocationInsight.java | 2 +-
.../insight/scm/ScmProtocolDatanodeInsight.java | 2 +-
.../insight/scm/ScmProtocolSecurityInsight.java | 2 +-
hadoop-ozone/integration-test/pom.xml | 4 +-
.../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 160 +++++
.../hadoop/fs/ozone/TestOzoneFileInterfaces.java | 15 +-
.../hadoop/fs/ozone/TestOzoneFileSystem.java | 28 +-
.../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 212 ++++--
.../ozone/contract/rooted/RootedOzoneContract.java | 7 +-
.../hdds/scm/pipeline/TestLeaderChoosePolicy.java | 216 ++++++
.../TestRatisPipelineCreateAndDestroy.java | 2 +-
.../org/apache/hadoop/ozone/OzoneTestUtils.java | 20 +-
.../java/org/apache/hadoop/ozone/TestDataUtil.java | 13 +-
.../apache/hadoop/ozone/TestMiniOzoneCluster.java | 4 +-
.../hadoop/ozone/TestOzoneConfigurationFields.java | 11 +-
.../ozone/TestStorageContainerManagerHelper.java | 31 +-
.../rpc/TestBlockOutputStreamWithFailures.java | 4 +-
...estBlockOutputStreamWithFailuresFlushDelay.java | 4 +-
.../rpc/TestCloseContainerHandlingByClient.java | 55 --
.../hadoop/ozone/client/rpc/TestCommitWatcher.java | 39 +-
.../rpc/TestContainerStateMachineFailures.java | 20 +-
.../client/rpc/TestDiscardPreallocatedBlocks.java | 186 ++++++
.../ozone/client/rpc/TestKeyInputStream.java | 119 ++--
.../rpc/TestOzoneClientRetriesOnException.java | 2 +-
...estOzoneClientRetriesOnExceptionFlushDelay.java | 2 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 721 ++++++++++++++++++++-
.../hadoop/ozone/client/rpc/TestReadRetries.java | 40 +-
.../client/rpc/TestValidateBCSIDOnRestart.java | 8 +-
.../ozone/client/rpc/TestWatchForCommit.java | 7 +-
.../commandhandler/TestBlockDeletion.java | 171 ++++-
.../ozoneimpl/TestOzoneContainerRatis.java | 138 ----
.../container/ozoneimpl/TestRatisManager.java | 124 ----
.../ozone/freon/TestFreonWithDatanodeRestart.java | 1 -
.../ozone/freon/TestHadoopNestedDirGenerator.java | 203 ++++++
.../ozone/freon/TestOzoneClientKeyGenerator.java | 2 -
.../hadoop/ozone/fsck/TestContainerMapper.java | 3 +
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 18 +-
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 2 -
.../org/apache/hadoop/ozone/om/TestOmLDBCli.java | 120 ++++
.../org/apache/hadoop/ozone/om/TestOmSQLCli.java | 235 -------
.../ozone/om/TestOzoneManagerHAMetadataOnly.java | 76 +++
.../ozone/om/TestOzoneManagerRocksDBLogging.java | 2 +-
.../ozone/recon/TestReconWithOzoneManager.java | 32 +-
.../hadoop/ozone/scm/TestCloseContainer.java | 148 +++++
.../hadoop/ozone/scm/TestContainerSmallFile.java | 20 +-
.../scm/TestGetCommittedBlockLengthAndPutKey.java | 4 +-
.../hadoop/ozone/scm/TestXceiverClientGrpc.java | 6 +-
.../hadoop/ozone/shell/TestOzoneDatanodeShell.java | 2 +-
.../hadoop/ozone/shell/TestOzoneShellHA.java | 48 +-
.../apache/hadoop/ozone/shell/TestScmAdminHA.java | 3 +-
hadoop-ozone/interface-client/pom.xml | 4 +-
.../src/main/proto/OmClientProtocol.proto | 54 ++
.../dev-support/findbugsExcludeFile.xml | 21 +
.../pom.xml | 54 +-
.../apache/hadoop/ozone/om/OMMetadataManager.java | 10 +-
.../ozone/om/codec/OMTransactionInfoCodec.java | 0
.../hadoop/ozone/om/codec/OmBucketInfoCodec.java | 0
.../hadoop/ozone/om/codec/OmKeyInfoCodec.java | 0
.../ozone/om/codec/OmMultipartKeyInfoCodec.java | 0
.../hadoop/ozone/om/codec/OmPrefixInfoCodec.java | 5 +-
.../hadoop/ozone/om/codec/OmVolumeArgsCodec.java | 0
.../ozone/om/codec/RepeatedOmKeyInfoCodec.java | 0
.../hadoop/ozone/om/codec/S3SecretValueCodec.java | 0
.../ozone/om/codec/TokenIdentifierCodec.java | 0
.../hadoop/ozone/om/codec/UserVolumeInfoCodec.java | 0
.../apache/hadoop/ozone/om/codec/package-info.java | 2 +-
.../hadoop/ozone/om/helpers/OmPrefixInfo.java | 13 +-
.../hadoop/ozone/om/helpers/OzoneAclStorage.java | 63 ++
.../ozone/om/helpers/OzoneAclStorageUtil.java | 62 ++
.../hadoop/ozone/om/helpers}/package-info.java | 4 +-
.../org/apache/hadoop/ozone/om}/package-info.java | 4 +-
.../hadoop/ozone/om/ratis/OMTransactionInfo.java | 7 +-
.../hadoop/ozone/om/ratis}/package-info.java | 4 +-
.../src/main/proto/OmStorageProtocol.proto | 60 ++
.../ozone/om/codec/TestOMTransactionInfoCodec.java | 0
.../hadoop/ozone/om/codec/TestOmKeyInfoCodec.java | 0
.../om/codec/TestOmMultipartKeyInfoCodec.java | 0
.../ozone/om/codec/TestOmPrefixInfoCodec.java | 0
.../ozone/om/codec/TestRepeatedOmKeyInfoCodec.java | 0
.../ozone/om/codec/TestS3SecretValueCodec.java | 0
.../apache/hadoop/ozone/om/codec/package-info.java | 0
.../hadoop/ozone/om/helpers/TestOmPrefixInfo.java | 0
.../hadoop/ozone/om/helpers}/package-info.java | 4 +-
hadoop-ozone/ozone-manager/pom.xml | 10 +-
.../apache/hadoop/ozone/om/BucketManagerImpl.java | 4 +-
.../apache/hadoop/ozone/om/KeyDeletingService.java | 9 +-
.../org/apache/hadoop/ozone/om/KeyManager.java | 11 +-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 86 ++-
.../java/org/apache/hadoop/ozone/om/OMMetrics.java | 37 ++
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 38 +-
.../hadoop/ozone/om/OpenKeyCleanupService.java | 45 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 127 ++--
.../org/apache/hadoop/ozone/om/VolumeManager.java | 9 -
.../apache/hadoop/ozone/om/VolumeManagerImpl.java | 39 --
.../apache/hadoop/ozone/om/codec/package-info.java | 3 +
.../apache/hadoop/ozone/om/fs/OzoneManagerFS.java | 53 +-
.../ozone/om/ratis/OzoneManagerRatisServer.java | 18 +-
.../ozone/om/ratis/OzoneManagerStateMachine.java | 20 +-
.../om/ratis/utils/OzoneManagerRatisUtils.java | 3 +
.../hadoop/ozone/om/request/OMClientRequest.java | 18 +-
.../om/request/bucket/OMBucketCreateRequest.java | 38 ++
.../request/bucket/OMBucketSetPropertyRequest.java | 64 ++
.../bucket/acl/OMBucketRemoveAclRequest.java | 2 +-
.../request/bucket/acl/OMBucketSetAclRequest.java | 2 +-
.../ozone/om/request/file/OMFileCreateRequest.java | 47 +-
.../om/request/key/OMAllocateBlockRequest.java | 46 +-
.../ozone/om/request/key/OMKeyCommitRequest.java | 48 +-
.../ozone/om/request/key/OMKeyCreateRequest.java | 49 +-
.../ozone/om/request/key/OMKeyDeleteRequest.java | 26 +-
.../hadoop/ozone/om/request/key/OMKeyRequest.java | 124 +++-
.../ozone/om/request/key/OMKeysDeleteRequest.java | 20 +-
.../ozone/om/request/key/OMKeysRenameRequest.java | 271 ++++++++
.../om/request/key/OMTrashRecoverRequest.java | 2 +-
.../om/request/key/acl/OMKeyRemoveAclRequest.java | 2 +-
.../om/request/key/acl/OMKeySetAclRequest.java | 2 +-
.../key/acl/prefix/OMPrefixRemoveAclRequest.java | 2 +-
.../key/acl/prefix/OMPrefixSetAclRequest.java | 2 +-
.../multipart/S3MultipartUploadAbortRequest.java | 41 +-
.../S3MultipartUploadCommitPartRequest.java | 22 +-
.../S3MultipartUploadCompleteRequest.java | 10 +
.../om/request/volume/OMVolumeCreateRequest.java | 6 +
.../om/request/volume/OMVolumeSetQuotaRequest.java | 52 +-
.../hadoop/ozone/om/response/CleanupTableInfo.java | 3 +-
.../om/response/file/OMFileCreateResponse.java | 10 +-
...ponse.java => AbstractOMKeyDeleteResponse.java} | 52 +-
.../om/response/key/OMAllocateBlockResponse.java | 18 +-
.../ozone/om/response/key/OMKeyCommitResponse.java | 18 +-
.../ozone/om/response/key/OMKeyCreateResponse.java | 20 +-
.../ozone/om/response/key/OMKeyDeleteResponse.java | 73 +--
.../om/response/key/OMKeysDeleteResponse.java | 56 +-
...mmitResponse.java => OMKeysRenameResponse.java} | 51 +-
.../om/response/key/OMOpenKeysDeleteRequest.java | 192 ++++++
...Response.java => OMOpenKeysDeleteResponse.java} | 48 +-
.../multipart/S3MultipartUploadAbortResponse.java | 20 +-
.../S3MultipartUploadCommitPartResponse.java | 18 +-
...OzoneManagerProtocolServerSideTranslatorPB.java | 4 +-
.../OzoneDelegationTokenSecretManager.java | 2 +-
.../main/resources/webapps/ozoneManager/index.html | 4 +-
.../hadoop/ozone/om/TestOmMetadataManager.java | 77 +++
.../hadoop/ozone/om/failover/TestOMFailovers.java | 151 +++++
.../ozone/om/request/TestOMRequestUtils.java | 130 +++-
.../bucket/TestOMBucketSetPropertyRequest.java | 41 +-
.../request/file/TestOMDirectoryCreateRequest.java | 4 +-
.../om/request/key/TestOMAllocateBlockRequest.java | 22 +-
.../key/TestOMKeyPurgeRequestAndResponse.java | 15 +-
.../ozone/om/request/key/TestOMKeyRequest.java | 7 +-
.../om/request/key/TestOMKeysRenameRequest.java | 160 +++++
.../request/key/TestOMOpenKeysDeleteRequest.java | 419 ++++++++++++
.../s3/multipart/TestS3MultipartRequest.java | 4 +-
.../volume/TestOMVolumeSetOwnerRequest.java | 2 +-
.../volume/TestOMVolumeSetQuotaRequest.java | 69 +-
.../bucket/TestOMBucketCreateResponse.java | 8 +
.../bucket/TestOMBucketDeleteResponse.java | 8 +
.../bucket/TestOMBucketSetPropertyResponse.java | 9 +
.../file/TestOMDirectoryCreateResponse.java | 8 +
.../response/key/TestOMAllocateBlockResponse.java | 27 +-
.../om/response/key/TestOMKeyCommitResponse.java | 19 +-
.../om/response/key/TestOMKeyCreateResponse.java | 23 +-
.../om/response/key/TestOMKeyDeleteResponse.java | 27 +-
.../ozone/om/response/key/TestOMKeyResponse.java | 3 +
.../om/response/key/TestOMKeysDeleteResponse.java | 38 +-
.../om/response/key/TestOMKeysRenameResponse.java | 131 ++++
.../response/key/TestOMOpenKeysDeleteResponse.java | 185 ++++++
.../s3/multipart/TestS3MultipartResponse.java | 16 +-
.../TestS3MultipartUploadAbortResponse.java | 21 +-
.../security/TestOMDelegationTokenResponse.java | 8 +
.../volume/TestOMVolumeCreateResponse.java | 8 +
.../volume/TestOMVolumeDeleteResponse.java | 8 +
.../volume/TestOMVolumeSetOwnerResponse.java | 9 +
.../volume/TestOMVolumeSetQuotaResponse.java | 9 +
hadoop-ozone/ozonefs-common/pom.xml | 4 +-
.../fs/ozone/BasicOzoneClientAdapterImpl.java | 23 +-
.../hadoop/fs/ozone/BasicOzoneFileSystem.java | 98 ++-
.../ozone/BasicRootedOzoneClientAdapterImpl.java | 138 ++--
.../fs/ozone/BasicRootedOzoneFileSystem.java | 151 ++++-
.../hadoop/fs/ozone/OzoneClientAdapterImpl.java | 4 +-
.../fs/ozone/RootedOzoneClientAdapterImpl.java | 4 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/ozonefs-hadoop2/pom.xml | 4 +-
.../org/apache/hadoop/fs/ozone/RootedOzFs.java | 36 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/ozonefs-hadoop3/pom.xml | 4 +-
.../apache/hadoop/fs/ozone/OzoneFileSystem.java | 4 +-
.../org/apache/hadoop/fs/ozone/RootedOzFs.java | 33 +-
.../hadoop/fs/ozone/RootedOzoneFileSystem.java | 4 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/ozonefs-shaded/pom.xml | 9 +-
hadoop-ozone/ozonefs/pom.xml | 4 +-
.../apache/hadoop/fs/ozone/OzoneFileSystem.java | 4 +-
.../org/apache/hadoop/fs/ozone/RootedOzFs.java | 33 +-
.../hadoop/fs/ozone/RootedOzoneFileSystem.java | 4 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/pom.xml | 17 +-
hadoop-ozone/recon-codegen/pom.xml | 2 +-
.../recon/schema/ContainerSchemaDefinition.java | 4 +-
.../recon/schema/ReconTaskSchemaDefinition.java | 2 +-
.../ozone/recon/schema/StatsSchemaDefinition.java | 14 +-
.../recon/schema/UtilizationSchemaDefinition.java | 23 +-
hadoop-ozone/recon/pom.xml | 2 +-
.../ozone/recon/MetricsServiceProviderFactory.java | 86 +++
.../apache/hadoop/ozone/recon/ReconConstants.java | 8 +-
.../hadoop/ozone/recon/ReconControllerModule.java | 3 +
.../hadoop/ozone/recon/ReconSchemaManager.java | 2 +-
.../hadoop/ozone/recon/ReconServerConfigKeys.java | 50 +-
.../org/apache/hadoop/ozone/recon/ReconUtils.java | 47 +-
.../ozone/recon/api/ClusterStateEndpoint.java | 47 +-
.../hadoop/ozone/recon/api/ContainerEndpoint.java | 2 +-
.../ozone/recon/api/MetricsProxyEndpoint.java | 118 ++++
.../hadoop/ozone/recon/api/PipelineEndpoint.java | 59 +-
.../ozone/recon/codec/DatanodeDetailsCodec.java | 5 +-
.../apache/hadoop/ozone/recon/metrics/Metric.java | 34 +-
.../ReconIncrementalContainerReportHandler.java | 4 +-
.../ozone/recon/spi/MetricsServiceProvider.java | 60 ++
.../spi/impl/ContainerDBServiceProviderImpl.java | 33 +-
.../spi/impl/OzoneManagerServiceProviderImpl.java | 54 +-
.../spi/impl/PrometheusServiceProviderImpl.java | 213 ++++++
.../ozone/recon/tasks/FileSizeCountTask.java | 4 +-
.../hadoop/ozone/recon/tasks/OMDBUpdateEvent.java | 2 +-
.../ozone/recon/tasks/OMDBUpdatesHandler.java | 92 ++-
.../ozone/recon/tasks/ReconTaskControllerImpl.java | 10 +-
.../hadoop/ozone/recon/tasks/TableCountTask.java | 185 ++++++
.../webapps/recon/ozone-recon-web/pnpm-lock.yaml | 66 +-
.../src/views/overview/overview.tsx | 2 +-
.../hadoop/ozone/recon/ReconTestInjector.java | 3 +
.../apache/hadoop/ozone/recon/TestReconUtils.java | 6 +-
.../hadoop/ozone/recon/api/TestEndpoints.java | 151 ++++-
.../impl/TestOzoneManagerServiceProviderImpl.java | 9 +-
.../ozone/recon/tasks/TestOMDBUpdatesHandler.java | 54 +-
.../recon/tasks/TestReconTaskControllerImpl.java | 6 +-
.../ozone/recon/tasks/TestTableCountTask.java | 178 +++++
.../test/resources/prometheus-test-response.txt | 21 +
hadoop-ozone/s3gateway/pom.xml | 9 +-
.../hadoop/ozone/s3/S3GatewayConfigKeys.java | 6 +
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 69 +-
.../hadoop/ozone/s3/io/S3WrapperInputStream.java | 36 +-
.../hadoop/ozone/client/ObjectStoreStub.java | 5 +-
.../hadoop/ozone/client/OzoneVolumeStub.java | 6 +-
.../s3/endpoint/TestMultipartUploadWithCopy.java | 15 +-
hadoop-ozone/tools/pom.xml | 14 +-
.../org/apache/hadoop/ozone/admin/OzoneAdmin.java | 122 ----
.../ozone/admin/om/GetServiceRolesSubcommand.java | 4 +-
.../org/apache/hadoop/ozone/admin/om/OMAdmin.java | 12 +-
.../admin/scm/GetScmRatisRolesSubcommand.java | 9 +-
.../apache/hadoop/ozone/admin/scm/ScmAdmin.java | 6 +-
.../org/apache/hadoop/ozone/conf/OzoneGetConf.java | 86 +++
.../ozone/conf/OzoneManagersCommandHandler.java | 53 ++
.../ozone/conf/PrintConfKeyCommandHandler.java | 52 ++
.../StorageContainerManagersCommandHandler.java | 52 ++
.../org/apache/hadoop/ozone/conf/package-info.java | 11 +-
.../apache/hadoop/ozone/debug/ChunkKeyHandler.java | 165 +++--
.../hadoop/ozone/debug/ContainerChunkInfo.java | 21 +-
.../org/apache/hadoop/ozone/debug/DBScanner.java | 116 +++-
.../org/apache/hadoop/ozone/debug/DropTable.java | 81 +++
.../org/apache/hadoop/ozone/debug/ListTables.java | 19 +-
.../org/apache/hadoop/ozone/debug/OzoneDebug.java | 11 +-
.../org/apache/hadoop/ozone/debug/RDBParser.java | 38 +-
.../debug/{ListTables.java => RocksDBUtils.java} | 36 +-
.../hadoop/ozone/freon/BaseFreonGenerator.java | 22 +
.../hadoop/ozone/freon/ContentGenerator.java | 31 +-
.../hadoop/ozone/freon/DatanodeChunkValidator.java | 244 +++++++
.../java/org/apache/hadoop/ozone/freon/Freon.java | 5 +
.../hadoop/ozone/freon/HadoopDirTreeGenerator.java | 55 +-
.../hadoop/ozone/freon/HadoopFsGenerator.java | 12 +-
.../ozone/freon/HadoopNestedDirGenerator.java | 27 +-
.../hadoop/ozone/freon/RandomKeyGenerator.java | 53 +-
.../hadoop/ozone/segmentparser/RatisLogParser.java | 12 +-
.../hadoop/ozone/shell/ClearSpaceQuotaOptions.java | 26 +-
.../apache/hadoop/ozone/shell/OzoneAddress.java | 103 ++-
.../org/apache/hadoop/ozone/shell/OzoneShell.java | 14 +-
.../hadoop/ozone/shell/SetSpaceQuotaOptions.java | 27 +-
.../java/org/apache/hadoop/ozone/shell/Shell.java | 8 +-
.../hadoop/ozone/shell/bucket/BucketCommands.java | 16 +-
.../ClearQuotaHandler.java} | 32 +-
.../ozone/shell/bucket/CreateBucketHandler.java | 13 +
.../hadoop/ozone/shell/bucket/SetQuotaHandler.java | 62 ++
.../hadoop/ozone/shell/keys/KeyCommands.java | 12 +-
.../hadoop/ozone/shell/token/GetTokenHandler.java | 8 +-
.../ozone/shell/token/PrintTokenHandler.java | 3 +-
.../ozone/shell/token/RenewTokenHandler.java | 4 +-
.../hadoop/ozone/shell/token/TokenCommands.java | 16 +-
.../hadoop/ozone/shell/token/TokenOption.java | 38 +-
.../ClearQuotaHandler.java} | 30 +-
.../ozone/shell/volume/CreateVolumeHandler.java | 18 +-
...dateVolumeHandler.java => SetQuotaHandler.java} | 47 +-
.../ozone/shell/volume/UpdateVolumeHandler.java | 11 -
.../hadoop/ozone/shell/volume/VolumeCommands.java | 16 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
.../hadoop/ozone/conf/TestGetConfOptions.java | 90 +++
.../hadoop/ozone/freon/TestContentGenerator.java | 82 +++
.../TestGenerateOzoneRequiredConfigurations.java | 5 +-
.../hadoop/ozone/shell/TestOzoneAddress.java | 6 +-
.../shell/TestOzoneAddressClientCreation.java | 172 +++++
pom.xml | 36 +-
823 files changed, 21890 insertions(+), 8817 deletions(-)
create mode 100755 .github/close-pending.sh
create mode 100644 .github/closing-message.txt
copy hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem => .github/workflows/close-pending.yaml (65%)
copy hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem => dev-support/byteman/appendlog.btm (66%)
create mode 100644 dev-support/byteman/hcfs-read.btm
create mode 100644 dev-support/byteman/hcfs-write.btm
copy hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot => dev-support/byteman/ratis-flush.btm (57%)
copy hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/resources/META-INF/services/org.apache.hadoop.fs.FileSystem => dev-support/byteman/ratis-no-flush.btm (78%)
copy hadoop-ozone/dist/src/main/smoketest/admincli/datanode.robot => dev-support/byteman/watchforcommit.btm (56%)
create mode 100644 dev-support/byteman/watchforcommit_all.btm
rename hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/package-info.java => hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientFactory.java (60%)
create mode 100644 hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBlockOutputStreamCorrectness.java
copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/RDBParser.java => hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java (56%)
copy hadoop-hdds/{tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java => common/src/main/java/org/apache/hadoop/hdds/cli/SubcommandWithParent.java} (77%)
copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/{utils/BackgroundTask.java => scm/PipelineChoosePolicy.java} (62%)
copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/{recon/ReconConfigKeys.java => scm/PipelineRequestInformation.java} (54%)
copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/{VersionInfo.java => RatisVersionInfo.java} (50%)
create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkInfoList.java
copy hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/{ConfigType.java => InMemoryConfiguration.java} (51%)
create mode 100644 hadoop-hdds/config/src/test/java/org/apache/hadoop/hdds/conf/TestConfigurationReflectionUtil.java
delete mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueBlockIterator.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeDBDefinition.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/AbstractDatanodeStore.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/BlockDataCodec.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/ChunkInfoListCodec.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaOneDBDefinition.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeSchemaTwoDBDefinition.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStore.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaOneImpl.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeStoreSchemaTwoImpl.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/DatanodeTable.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneChunkInfoListCodec.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneDeletedBlocksTable.java
create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/metadata/SchemaOneKeyCodec.java
copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/hdds/cli => container-service/src/main/java/org/apache/hadoop/ozone/container/metadata}/package-info.java (81%)
create mode 100644 hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestSchemaOneBackwardsCompatibility.java
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000024.sst
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000026.sst
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000032.sst
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/000034.log
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/CURRENT
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/IDENTITY
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/MANIFEST-000033
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/OPTIONS-000033
create mode 100644 hadoop-hdds/container-service/src/test/resources/123-dn-container.db/OPTIONS-000036
create mode 100644 hadoop-hdds/container-service/src/test/resources/123.container
delete mode 100644 hadoop-hdds/docs/content/beyond/Containers.md
delete mode 100644 hadoop-hdds/docs/content/beyond/Containers.zh.md
delete mode 100644 hadoop-hdds/docs/content/beyond/DockerCheatSheet.md
delete mode 100644 hadoop-hdds/docs/content/beyond/DockerCheatSheet.zh.md
delete mode 100644 hadoop-hdds/docs/content/beyond/_index.md
delete mode 100644 hadoop-hdds/docs/content/beyond/_index.zh.md
create mode 100644 hadoop-hdds/docs/content/concept/Containers.md
create mode 100644 hadoop-hdds/docs/content/concept/Containers.png
delete mode 100644 hadoop-hdds/docs/content/concept/Hdds.md
create mode 100644 hadoop-hdds/docs/content/concept/OzoneManager-ReadPath.png
create mode 100644 hadoop-hdds/docs/content/concept/OzoneManager-WritePath.png
create mode 100644 hadoop-hdds/docs/content/concept/OzoneManager.png
create mode 100644 hadoop-hdds/docs/content/concept/StorageContainerManager.md
create mode 100644 hadoop-hdds/docs/content/concept/StorageContainerManager.png
rename hadoop-hdds/docs/content/concept/{Hdds.zh.md => StorageContainerManager.zh.md} (91%)
create mode 100644 hadoop-hdds/docs/content/design/ec.md
copy hadoop-ozone/dist/src/main/compose/upgrade/versions/README.md => hadoop-hdds/docs/content/design/storage-class.md (59%)
copy hadoop-hdds/docs/content/design/{namespace-support.md => topology.md} (58%)
rename hadoop-hdds/docs/content/design/{ozone-volume-management.md => volume-management.md} (100%)
create mode 100644 hadoop-hdds/docs/content/feature/GDPR.md
rename hadoop-hdds/docs/content/{gdpr/GDPR in Ozone.zh.md => feature/GDPR.zh.md} (90%)
create mode 100644 hadoop-hdds/docs/content/feature/HA-OM-doublebuffer.png
create mode 100644 hadoop-hdds/docs/content/feature/HA-OM.png
create mode 100644 hadoop-hdds/docs/content/feature/HA.md
create mode 100644 hadoop-hdds/docs/content/feature/Observability.md
create mode 100644 hadoop-hdds/docs/content/feature/Recon.md
create mode 100644 hadoop-hdds/docs/content/feature/Topology.md
rename hadoop-hdds/docs/content/{gdpr => feature}/_index.md (80%)
rename hadoop-hdds/docs/content/{gdpr => feature}/_index.zh.md (100%)
delete mode 100644 hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md
create mode 100644 hadoop-hdds/docs/content/interface/CSI.png
create mode 100644 hadoop-hdds/docs/content/interface/CSI.zh.md
create mode 100644 hadoop-hdds/docs/content/interface/Cli.md
rename hadoop-hdds/docs/content/interface/{OzoneFS.md => O3fs.md} (65%)
rename hadoop-hdds/docs/content/interface/{OzoneFS.zh.md => O3fs.zh.md} (91%)
copy hadoop-hdds/docs/content/{design/ofs.md => interface/Ofs.md} (53%)
create mode 100644 hadoop-hdds/docs/content/security/SecuringDatanodes.zh.md
rename hadoop-hdds/docs/content/security/{SecuityWithRanger.md => SecurityWithRanger.md} (97%)
rename hadoop-hdds/docs/content/security/{SecuityWithRanger.zh.md => SecurityWithRanger.zh.md} (97%)
delete mode 100644 hadoop-hdds/docs/content/shell/BucketCommands.md
delete mode 100644 hadoop-hdds/docs/content/shell/BucketCommands.zh.md
delete mode 100644 hadoop-hdds/docs/content/shell/Format.md
delete mode 100644 hadoop-hdds/docs/content/shell/Format.zh.md
delete mode 100644 hadoop-hdds/docs/content/shell/KeyCommands.md
delete mode 100644 hadoop-hdds/docs/content/shell/KeyCommands.zh.md
delete mode 100644 hadoop-hdds/docs/content/shell/VolumeCommands.md
delete mode 100644 hadoop-hdds/docs/content/shell/VolumeCommands.zh.md
delete mode 100644 hadoop-hdds/docs/content/shell/_index.md
delete mode 100644 hadoop-hdds/docs/content/shell/_index.zh.md
delete mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.7.9.min.js
create mode 100644 hadoop-hdds/framework/src/main/resources/webapps/static/angular-1.8.0.min.js
rename hadoop-hdds/framework/src/main/resources/webapps/static/{angular-route-1.7.9.min.js => angular-route-1.8.0.min.js} (97%)
copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/HealthyPipelineChoosePolicy.java} (50%)
create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/PipelineChoosePolicyFactory.java
copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/RandomPipelineChoosePolicy.java} (54%)
copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/package-info.java} (78%)
copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/DefaultLeaderChoosePolicy.java} (51%)
create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicy.java
create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/LeaderChoosePolicyFactory.java
create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/MinLeaderCountChoosePolicy.java
copy hadoop-hdds/{common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/package-info.java} (78%)
create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/choose/algorithms/TestPipelineChoosePolicyFactory.java
create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/leader/choose/algorithms/TestLeaderChoosePolicy.java
create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/cli/OzoneAdmin.java
copy hadoop-hdds/{common => tools}/src/main/java/org/apache/hadoop/hdds/cli/package-info.java (88%)
create mode 100644 hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ScmOption.java
copy hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/{container/WithScmClient.java => ScmSubcommand.java} (60%)
delete mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/freon/OzoneGetConf.java
create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmRenameKeys.java
create mode 100644 hadoop-ozone/dev-support/intellij/core-site.xml
create mode 120000 hadoop-ozone/dist/src/main/compose/failing1/.env
create mode 120000 hadoop-ozone/dist/src/main/compose/failing1/docker-compose.yaml
create mode 120000 hadoop-ozone/dist/src/main/compose/failing1/docker-config
copy hadoop-ozone/dist/src/main/compose/{ozone-om-ha-s3 => failing1}/test.sh (88%)
mode change 100644 => 100755
create mode 120000 hadoop-ozone/dist/src/main/compose/failing2/.env
create mode 120000 hadoop-ozone/dist/src/main/compose/failing2/docker-compose.yaml
create mode 120000 hadoop-ozone/dist/src/main/compose/failing2/docker-config
copy hadoop-ozone/dist/src/main/compose/{ozone-om-ha-s3 => failing2}/test.sh (88%)
mode change 100644 => 100755
copy hadoop-ozone/dist/src/main/compose/{ozone => ozone-ha}/.env (100%)
create mode 100644 hadoop-ozone/dist/src/main/compose/ozone-ha/docker-compose.yaml
copy hadoop-ozone/dist/src/main/compose/{ozone-csi => ozone-ha}/docker-config (71%)
copy hadoop-ozone/dist/src/main/compose/{ozone-om-ha-s3 => ozone-ha}/test.sh (92%)
mode change 100644 => 100755
rename hadoop-ozone/dist/src/main/compose/upgrade/versions/{ozone-0.6.0.sh => ozone-1.0.0.sh} (91%)
copy hadoop-ozone/dist/src/main/smoketest/{topology/loaddata.robot => admincli/admin.robot} (58%)
create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/container.robot
create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/replicationmanager.robot
create mode 100644 hadoop-ozone/dist/src/main/smoketest/admincli/safemode.robot
create mode 100644 hadoop-ozone/dist/src/main/smoketest/basic/getconf.robot
copy hadoop-ozone/dist/src/main/smoketest/basic/{ozone-shell.robot => ozone-shell-lib.robot} (70%)
copy hadoop-ozone/dist/src/main/smoketest/{om-ratis/testOMAdminCmd.robot => basic/ozone-shell-single.robot} (72%)
copy hadoop-ozone/{ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem => dist/src/main/smoketest/failing/test1.robot} (88%)
copy hadoop-ozone/{ozonefs-hadoop2/src/main/resources/META-INF/services/org.apache.hadoop.fs.FileSystem => dist/src/main/smoketest/failing/test2.robot} (88%)
delete mode 100644 hadoop-ozone/dist/src/main/smoketest/robot.robot
create mode 100755 hadoop-ozone/dist/src/main/smoketest/s3/s3_compatbility_check.sh
rename hadoop-ozone/dist/src/shell/upgrade/{0.6.0.sh => 1.0.0.sh} (94%)
rename hadoop-ozone/dist/src/shell/upgrade/{0.6.0 => 1.0.0}/01-migrate-scm-db.sh (100%)
create mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/DatanodeDispatcherInsight.java
create mode 100644 hadoop-ozone/insight/src/main/java/org/apache/hadoop/ozone/insight/datanode/PipelineComponentUtil.java
create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestHadoopNestedDirGenerator.java
create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmLDBCli.java
delete mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSQLCli.java
create mode 100644 hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
create mode 100644 hadoop-ozone/interface-storage/dev-support/findbugsExcludeFile.xml
copy hadoop-ozone/{interface-client => interface-storage}/pom.xml (63%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java (97%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OMTransactionInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmBucketInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmKeyInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmMultipartKeyInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmPrefixInfoCodec.java (91%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/OmVolumeArgsCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/RepeatedOmKeyInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/S3SecretValueCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/TokenIdentifierCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/UserVolumeInfoCodec.java (100%)
copy hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/codec/package-info.java (95%)
rename hadoop-ozone/{common => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/helpers/OmPrefixInfo.java (92%)
create mode 100644 hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorage.java
create mode 100644 hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers/OzoneAclStorageUtil.java
copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/main/java/org/apache/hadoop/ozone/om/helpers}/package-info.java (91%)
copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/main/java/org/apache/hadoop/ozone/om}/package-info.java (92%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/main/java/org/apache/hadoop/ozone/om/ratis/OMTransactionInfo.java (98%)
copy hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/main/java/org/apache/hadoop/ozone/om/ratis}/package-info.java (92%)
create mode 100644 hadoop-ozone/interface-storage/src/main/proto/OmStorageProtocol.proto
rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOMTransactionInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmKeyInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmMultipartKeyInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestOmPrefixInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestRepeatedOmKeyInfoCodec.java (100%)
rename hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/TestS3SecretValueCodec.java (100%)
copy hadoop-ozone/{ozone-manager => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/codec/package-info.java (100%)
rename hadoop-ozone/{common => interface-storage}/src/test/java/org/apache/hadoop/ozone/om/helpers/TestOmPrefixInfo.java (100%)
rename hadoop-ozone/{ozone-manager/src/test/java/org/apache/hadoop/ozone/om/codec => interface-storage/src/test/java/org/apache/hadoop/ozone/om/helpers}/package-info.java (92%)
create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeysRenameRequest.java
copy hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/{OMKeyDeleteResponse.java => AbstractOMKeyDeleteResponse.java} (69%)
copy hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/{OMKeyCommitResponse.java => OMKeysRenameResponse.java} (55%)
create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/OMOpenKeysDeleteRequest.java
copy hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/key/{OMAllocateBlockResponse.java => OMOpenKeysDeleteResponse.java} (58%)
create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/failover/TestOMFailovers.java
create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeysRenameRequest.java
create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMOpenKeysDeleteRequest.java
create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMKeysRenameResponse.java
create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/key/TestOMOpenKeysDeleteResponse.java
copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java => hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java (52%)
copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java => hadoop-ozone/ozonefs-hadoop3/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java (54%)
copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java => hadoop-ozone/ozonefs/src/main/java/org/apache/hadoop/fs/ozone/RootedOzFs.java (54%)
create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java
create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/MetricsProxyEndpoint.java
copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java => hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/metrics/Metric.java (57%)
create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/MetricsServiceProvider.java
create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/spi/impl/PrometheusServiceProviderImpl.java
create mode 100644 hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/tasks/TableCountTask.java
create mode 100644 hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
create mode 100644 hadoop-ozone/recon/src/test/resources/prometheus-test-response.txt
delete mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/OzoneAdmin.java
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneGetConf.java
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/OzoneManagersCommandHandler.java
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/PrintConfKeyCommandHandler.java
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/StorageContainerManagersCommandHandler.java
copy hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/utils/BackgroundTask.java => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/conf/package-info.java (79%)
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/DropTable.java
copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/debug/{ListTables.java => RocksDBUtils.java} (58%)
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/DatanodeChunkValidator.java
copy hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/ClearSpaceQuotaOptions.java (60%)
rename hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/WithScmClient.java => hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/SetSpaceQuotaOptions.java (55%)
copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/{token/RenewTokenHandler.java => bucket/ClearQuotaHandler.java} (57%)
create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/bucket/SetQuotaHandler.java
copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/{token/RenewTokenHandler.java => volume/ClearQuotaHandler.java} (59%)
copy hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/volume/{UpdateVolumeHandler.java => SetQuotaHandler.java} (58%)
create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/conf/TestGetConfOptions.java
create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/freon/TestContentGenerator.java
create mode 100644 hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneAddressClientCreation.java
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 01/11: Merge branch 'master' into HDDS-2823
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 6de98c6d124ec127310e1aa9fac0cbc576991cfc
Merge: 40127b3 10df489
Author: Nandakumar <na...@apache.org>
AuthorDate: Sat Oct 24 20:07:04 2020 +0530
Merge branch 'master' into HDDS-2823
.github/close-pending.sh | 41 +
.github/closing-message.txt | 7 +
.github/comment-commands/close.sh | 10 +-
.github/comment-commands/pending.sh | 1 +
.github/workflows/close-pending.yaml | 32 +
.github/workflows/comments.yaml | 2 +-
.github/workflows/post-commit.yml | 411 ++--
LICENSE.txt | 4 +-
dev-support/byteman/appendlog.btm | 28 +
dev-support/byteman/hcfs-read.btm | 67 +
dev-support/byteman/hcfs-write.btm | 111 ++
dev-support/byteman/ratis-flush.btm | 34 +
dev-support/byteman/ratis-no-flush.btm | 25 +
dev-support/byteman/watchforcommit.btm | 35 +
dev-support/byteman/watchforcommit_all.btm | 47 +
hadoop-hdds/client/pom.xml | 15 +-
.../hadoop/hdds/scm/XceiverClientFactory.java | 38 +
.../apache/hadoop/hdds/scm/XceiverClientGrpc.java | 48 +-
.../hadoop/hdds/scm/XceiverClientManager.java | 40 +-
.../apache/hadoop/hdds/scm/XceiverClientRatis.java | 11 +-
.../hadoop/hdds/scm/client/HddsClientUtils.java | 8 +-
.../hadoop/hdds/scm/storage/BlockInputStream.java | 62 +-
.../hadoop/hdds/scm/storage/BlockOutputStream.java | 158 +-
.../apache/hadoop/hdds/scm/storage/BufferPool.java | 49 +-
.../hadoop/hdds/scm/storage/ChunkInputStream.java | 10 +-
.../hadoop/hdds/scm/storage/CommitWatcher.java | 37 +-
.../hdds/scm/storage/DummyChunkInputStream.java | 2 +-
.../storage/TestBlockOutputStreamCorrectness.java | 224 +++
.../hadoop/hdds/scm/storage/TestBufferPool.java | 46 +
hadoop-hdds/common/pom.xml | 14 +-
hadoop-hdds/common/src/main/conf/hadoop-env.sh | 13 +-
.../org/apache/hadoop/hdds/cli/GenericCli.java | 21 +
.../hadoop/hdds/cli/SubcommandWithParent.java | 30 +
.../org/apache/hadoop/hdds/cli/package-info.java | 4 +-
.../org/apache/hadoop/hdds/client/OzoneQuota.java | 240 ++-
.../hadoop/hdds/protocol/DatanodeDetails.java | 159 +-
.../org/apache/hadoop/hdds/ratis/RatisHelper.java | 160 +-
.../hadoop/hdds/ratis/conf/RatisClientConfig.java | 110 +-
.../RequestTypeDependentRetryPolicyCreator.java | 120 ++
.../retrypolicy/RetryLimitedPolicyCreator.java | 47 +
.../hdds/ratis/retrypolicy/RetryPolicyCreator.java | 29 +
.../hdds/ratis/retrypolicy/package-info.java | 23 +
.../apache/hadoop/hdds/recon/ReconConfigKeys.java | 4 +
.../hadoop/hdds/scm/ByteStringConversion.java | 18 +-
.../hadoop/hdds/scm/PipelineChoosePolicy.java | 37 +
.../hdds/scm/PipelineRequestInformation.java | 59 +
.../java/org/apache/hadoop/hdds/scm/ScmConfig.java | 23 +
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 13 +
.../apache/hadoop/hdds/scm/XceiverClientSpi.java | 15 +-
.../hadoop/hdds/scm/container/ContainerInfo.java | 2 +
.../hadoop/hdds/scm/exceptions/SCMException.java | 4 +-
.../hadoop/hdds/scm/net/NetworkTopologyImpl.java | 2 +-
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 42 +-
.../hdds/scm/storage/ContainerProtocolCalls.java | 229 ++-
.../x509/certificate/utils/CertificateCodec.java | 2 +-
.../hadoop/hdds/utils/BackgroundService.java | 73 +-
.../apache/hadoop/hdds/utils/BackgroundTask.java | 4 +-
.../hadoop/hdds/utils/BackgroundTaskQueue.java | 5 +-
.../apache/hadoop/hdds/utils/RatisVersionInfo.java | 61 +
.../org/apache/hadoop/hdds/utils/Scheduler.java | 2 +-
.../org/apache/hadoop/hdds/utils/VersionInfo.java | 9 +-
.../org/apache/hadoop/ozone/OzoneConfigKeys.java | 4 +
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 56 +-
.../apache/hadoop/ozone/common/ChunkBuffer.java | 14 +-
.../common/ChunkBufferImplWithByteBuffer.java | 10 +-
.../org/apache/hadoop/ozone/common/Storage.java | 2 +-
.../container/common/helpers/ChunkInfoList.java | 56 +
.../common/src/main/resources/ozone-default.xml | 82 +-
.../hadoop/hdds/conf/SimpleConfiguration.java | 13 +
.../hadoop/hdds/conf/TestOzoneConfiguration.java | 7 +-
.../hadoop/hdds/protocol/MockDatanodeDetails.java | 2 +-
.../hdds/ratis/conf/TestRaftClientConfig.java | 62 +
.../hdds/ratis/conf/TestRatisClientConfig.java | 68 +
.../hadoop/hdds/scm/pipeline/MockPipeline.java | 29 +-
.../hadoop/ozone/audit/TestOzoneAuditLogger.java | 62 +-
.../hadoop/ozone/common/TestChunkBuffer.java | 16 +-
hadoop-hdds/config/pom.xml | 4 +-
.../org/apache/hadoop/hdds/conf/ConfigType.java | 3 +-
.../hdds/conf/ConfigurationReflectionUtil.java | 57 +
.../hadoop/hdds/conf/InMemoryConfiguration.java | 58 +
.../hdds/conf/TestConfigurationReflectionUtil.java | 111 ++
hadoop-hdds/container-service/pom.xml | 11 +-
.../apache/hadoop/ozone/HddsDatanodeService.java | 18 +-
.../container/common/helpers/ContainerUtils.java | 66 +-
.../container/common/impl/ContainerDataYaml.java | 3 +
.../container/common/impl/HddsDispatcher.java | 61 +-
.../common/impl/StorageLocationReport.java | 11 +-
.../container/common/interfaces/BlockIterator.java | 5 +-
.../container/common/interfaces/Container.java | 7 -
.../common/statemachine/DatanodeStateMachine.java | 51 +-
.../common/statemachine/SCMConnectionManager.java | 9 +-
.../common/statemachine/StateContext.java | 31 +
.../CloseContainerCommandHandler.java | 2 +-
.../CreatePipelineCommandHandler.java | 15 +-
.../commandhandler/DeleteBlocksCommandHandler.java | 92 +-
.../states/datanode/RunningDatanodeState.java | 14 +-
.../states/endpoint/RegisterEndpointTask.java | 6 +-
.../common/transport/server/XceiverServerSpi.java | 22 +-
.../server/ratis/ContainerStateMachine.java | 14 +-
.../transport/server/ratis/XceiverServerRatis.java | 93 +-
.../container/common/utils/ContainerCache.java | 95 +-
.../container/common/utils/HddsVolumeUtil.java | 7 +-
.../container/common/utils/ReferenceCountedDB.java | 10 +-
.../container/common/volume/MutableVolumeSet.java | 10 +-
.../container/keyvalue/KeyValueBlockIterator.java | 156 --
.../container/keyvalue/KeyValueContainer.java | 29 +-
.../container/keyvalue/KeyValueContainerCheck.java | 12 +-
.../container/keyvalue/KeyValueContainerData.java | 48 +-
.../ozone/container/keyvalue/KeyValueHandler.java | 18 +-
.../container/keyvalue/helpers/BlockUtils.java | 2 +-
.../keyvalue/helpers/KeyValueContainerUtil.java | 184 +-
.../container/keyvalue/impl/BlockManagerImpl.java | 124 +-
.../keyvalue/interfaces/BlockManager.java | 12 +
.../background/BlockDeletingService.java | 75 +-
.../metadata/AbstractDatanodeDBDefinition.java | 74 +
.../container/metadata/AbstractDatanodeStore.java | 297 +++
.../ozone/container/metadata/BlockDataCodec.java | 47 +
.../container/metadata/ChunkInfoListCodec.java | 45 +
.../metadata/DatanodeSchemaOneDBDefinition.java | 91 +
.../metadata/DatanodeSchemaTwoDBDefinition.java | 81 +
.../ozone/container/metadata/DatanodeStore.java | 94 +
.../metadata/DatanodeStoreSchemaOneImpl.java | 49 +
.../metadata/DatanodeStoreSchemaTwoImpl.java | 44 +
.../ozone/container/metadata/DatanodeTable.java | 130 ++
.../metadata/SchemaOneChunkInfoListCodec.java | 68 +
.../metadata/SchemaOneDeletedBlocksTable.java | 180 ++
.../container/metadata/SchemaOneKeyCodec.java | 106 ++
.../ozone/container/metadata/package-info.java | 22 +
.../ozone/container/ozoneimpl/ContainerReader.java | 2 +
.../ozone/container/ozoneimpl/OzoneContainer.java | 30 +-
.../replication/GrpcReplicationClient.java | 29 +-
.../replication/SimpleContainerDownloader.java | 30 +-
.../protocol/StorageContainerDatanodeProtocol.java | 14 +-
.../protocol/commands/CreatePipelineCommand.java | 46 +-
...inerDatanodeProtocolClientSideTranslatorPB.java | 12 +-
...inerDatanodeProtocolServerSideTranslatorPB.java | 4 +-
.../main/resources/webapps/hddsDatanode/index.html | 4 +-
.../ozone/container/ContainerTestHelper.java | 5 +
.../hadoop/ozone/container/common/ScmTestMock.java | 9 +-
.../container/common/TestBlockDeletingService.java | 113 +-
.../ozone/container/common/TestContainerCache.java | 73 +-
.../common/TestKeyValueContainerData.java | 3 +
.../TestSchemaOneBackwardsCompatibility.java | 626 +++++++
.../common/impl/TestContainerDataYaml.java | 3 +
.../common/statemachine/TestStateContext.java | 85 +
.../TestCreatePipelineCommandHandler.java | 7 +-
.../states/datanode/TestRunningDatanodeState.java | 90 +
.../keyvalue/TestKeyValueBlockIterator.java | 340 ++--
.../container/keyvalue/TestKeyValueContainer.java | 75 +-
.../keyvalue/TestKeyValueContainerCheck.java | 18 +-
.../container/ozoneimpl/TestContainerReader.java | 128 +-
.../container/ozoneimpl/TestOzoneContainer.java | 65 +-
.../test/resources/123-dn-container.db/000024.sst | Bin 0 -> 1022 bytes
.../test/resources/123-dn-container.db/000026.sst | Bin 0 -> 827 bytes
.../test/resources/123-dn-container.db/000032.sst | Bin 0 -> 896 bytes
.../test/resources/123-dn-container.db/000034.log | 0
.../src/test/resources/123-dn-container.db/CURRENT | 1 +
.../test/resources/123-dn-container.db/IDENTITY | 1 +
.../resources/123-dn-container.db/MANIFEST-000033 | Bin 0 -> 297 bytes
.../resources/123-dn-container.db/OPTIONS-000033 | 165 ++
.../resources/123-dn-container.db/OPTIONS-000036 | 165 ++
.../src/test/resources/123.container | 10 +
hadoop-hdds/docs/content/_index.md | 5 +-
hadoop-hdds/docs/content/_index.zh.md | 2 +-
hadoop-hdds/docs/content/beyond/Containers.md | 234 ---
hadoop-hdds/docs/content/beyond/Containers.zh.md | 203 --
.../docs/content/beyond/DockerCheatSheet.md | 88 -
.../docs/content/beyond/DockerCheatSheet.zh.md | 85 -
hadoop-hdds/docs/content/beyond/_index.md | 30 -
hadoop-hdds/docs/content/beyond/_index.zh.md | 27 -
hadoop-hdds/docs/content/concept/Containers.md | 47 +
hadoop-hdds/docs/content/concept/Containers.png | Bin 0 -> 24775 bytes
hadoop-hdds/docs/content/concept/Datanodes.md | 5 +-
hadoop-hdds/docs/content/concept/Datanodes.zh.md | 3 +
hadoop-hdds/docs/content/concept/Hdds.md | 52 -
hadoop-hdds/docs/content/concept/Hdds.zh.md | 40 -
hadoop-hdds/docs/content/concept/Overview.md | 7 +-
hadoop-hdds/docs/content/concept/Overview.zh.md | 7 +-
.../docs/content/concept/OzoneManager-ReadPath.png | Bin 0 -> 81030 bytes
.../content/concept/OzoneManager-WritePath.png | Bin 0 -> 96696 bytes
hadoop-hdds/docs/content/concept/OzoneManager.md | 63 +-
hadoop-hdds/docs/content/concept/OzoneManager.png | Bin 0 -> 13327 bytes
.../docs/content/concept/OzoneManager.zh.md | 9 +
.../content/concept/StorageContainerManager.md | 99 +
.../content/concept/StorageContainerManager.png | Bin 0 -> 13336 bytes
.../content/concept/StorageContainerManager.zh.md | 49 +
hadoop-hdds/docs/content/concept/_index.md | 4 +-
hadoop-hdds/docs/content/concept/_index.zh.md | 2 +-
hadoop-hdds/docs/content/design/ec.md | 39 +
hadoop-hdds/docs/content/design/multiraft.md | 2 +-
.../docs/content/design/namespace-support.md | 6 +-
hadoop-hdds/docs/content/design/ofs.md | 131 --
.../content/design/ozone-enhancement-proposals.md | 2 +-
hadoop-hdds/docs/content/design/recon2.md | 2 +-
hadoop-hdds/docs/content/design/scmha.md | 4 +-
hadoop-hdds/docs/content/design/storage-class.md | 28 +
hadoop-hdds/docs/content/design/topology.md | 29 +
hadoop-hdds/docs/content/design/trash.md | 7 +-
hadoop-hdds/docs/content/design/typesafeconfig.md | 10 +-
...e-volume-management.md => volume-management.md} | 0
hadoop-hdds/docs/content/feature/GDPR.md | 80 +
hadoop-hdds/docs/content/feature/GDPR.zh.md | 41 +
.../docs/content/feature/HA-OM-doublebuffer.png | Bin 0 -> 77661 bytes
hadoop-hdds/docs/content/feature/HA-OM.png | Bin 0 -> 60888 bytes
hadoop-hdds/docs/content/feature/HA.md | 115 ++
hadoop-hdds/docs/content/feature/Observability.md | 224 +++
hadoop-hdds/docs/content/feature/Recon.md | 47 +
hadoop-hdds/docs/content/feature/Topology.md | 108 ++
hadoop-hdds/docs/content/feature/_index.md | 34 +
.../docs/content/{gdpr => feature}/_index.zh.md | 0
hadoop-hdds/docs/content/gdpr/GDPR in Ozone.md | 42 -
hadoop-hdds/docs/content/gdpr/GDPR in Ozone.zh.md | 36 -
hadoop-hdds/docs/content/gdpr/_index.md | 38 -
hadoop-hdds/docs/content/interface/CSI.md | 22 +-
hadoop-hdds/docs/content/interface/CSI.png | Bin 0 -> 27210 bytes
hadoop-hdds/docs/content/interface/CSI.zh.md | 92 +
hadoop-hdds/docs/content/interface/Cli.md | 208 +++
hadoop-hdds/docs/content/interface/JavaApi.md | 5 +-
hadoop-hdds/docs/content/interface/JavaApi.zh.md | 3 +
hadoop-hdds/docs/content/interface/O3fs.md | 127 ++
hadoop-hdds/docs/content/interface/O3fs.zh.md | 167 ++
hadoop-hdds/docs/content/interface/Ofs.md | 227 +++
hadoop-hdds/docs/content/interface/OzoneFS.md | 167 --
hadoop-hdds/docs/content/interface/OzoneFS.zh.md | 159 --
hadoop-hdds/docs/content/interface/S3.md | 29 +-
hadoop-hdds/docs/content/interface/S3.zh.md | 3 +
hadoop-hdds/docs/content/interface/_index.md | 4 +-
hadoop-hdds/docs/content/recipe/Prometheus.md | 5 +-
hadoop-hdds/docs/content/recipe/Prometheus.zh.md | 7 +-
hadoop-hdds/docs/content/recipe/SparkOzoneFSK8S.md | 1 -
.../docs/content/security/SecuityWithRanger.md | 43 -
.../docs/content/security/SecuityWithRanger.zh.md | 35 -
hadoop-hdds/docs/content/security/SecureOzone.md | 3 +
.../docs/content/security/SecureOzone.zh.md | 6 +-
.../docs/content/security/SecuringDatanodes.md | 39 +-
.../docs/content/security/SecuringDatanodes.zh.md | 53 +
.../docs/content/security/SecuringOzoneHTTP.md | 7 +-
hadoop-hdds/docs/content/security/SecuringS3.md | 5 +-
hadoop-hdds/docs/content/security/SecuringS3.zh.md | 3 +
hadoop-hdds/docs/content/security/SecuringTDE.md | 9 +-
.../docs/content/security/SecuringTDE.zh.md | 55 +
hadoop-hdds/docs/content/security/SecurityAcls.md | 11 +-
.../docs/content/security/SecurityAcls.zh.md | 69 +
.../docs/content/security/SecurityWithRanger.md | 46 +
.../docs/content/security/SecurityWithRanger.zh.md | 38 +
hadoop-hdds/docs/content/shell/BucketCommands.md | 100 -
.../docs/content/shell/BucketCommands.zh.md | 98 -
hadoop-hdds/docs/content/shell/Format.md | 69 -
hadoop-hdds/docs/content/shell/Format.zh.md | 65 -
hadoop-hdds/docs/content/shell/KeyCommands.md | 177 --
hadoop-hdds/docs/content/shell/KeyCommands.zh.md | 176 --
hadoop-hdds/docs/content/shell/VolumeCommands.md | 114 --
.../docs/content/shell/VolumeCommands.zh.md | 108 --
hadoop-hdds/docs/content/shell/_index.md | 28 -
hadoop-hdds/docs/content/shell/_index.zh.md | 27 -
hadoop-hdds/docs/content/start/FromSource.md | 38 +-
hadoop-hdds/docs/content/start/FromSource.zh.md | 7 +-
hadoop-hdds/docs/content/start/OnPrem.md | 2 +-
hadoop-hdds/docs/content/start/OnPrem.zh.md | 2 +-
.../docs/content/start/StartFromDockerHub.md | 6 +-
hadoop-hdds/docs/content/tools/TestTools.md | 14 +-
hadoop-hdds/docs/content/tools/TestTools.zh.md | 14 +-
hadoop-hdds/docs/content/tools/_index.md | 6 +-
hadoop-hdds/docs/pom.xml | 4 +-
.../themes/ozonedoc/layouts/_default/single.html | 2 +-
.../themes/ozonedoc/layouts/design/section.html | 2 +-
.../ozonedoc/layouts/partials/languages.html | 3 +-
.../themes/ozonedoc/layouts/partials/navbar.html | 6 +-
.../themes/ozonedoc/layouts/partials/sidebar.html | 14 +-
.../docs/themes/ozonedoc/static/css/ozonedoc.css | 23 +-
hadoop-hdds/framework/pom.xml | 4 +-
.../hadoop/hdds/conf/DatanodeRatisGrpcConfig.java | 7 +-
.../hdds/conf/DatanodeRatisServerConfig.java | 32 +-
.../x509/certificate/authority/BaseApprover.java | 11 +-
.../certificate/authority/DefaultApprover.java | 11 +
.../certificate/authority/DefaultCAServer.java | 42 +-
.../certificate/client/DNCertificateClient.java | 5 +-
.../certificates/utils/CertificateSignRequest.java | 2 +-
.../certificates/utils/SelfSignedCertificate.java | 109 +-
.../server/OzoneProtocolMessageDispatcher.java | 41 +-
.../hadoop/hdds/server/http/HttpServer2.java | 21 +
.../apache/hadoop/hdds/utils/HddsServerUtil.java | 29 +-
.../hadoop/hdds/utils/MetadataKeyFilters.java | 42 +-
.../apache/hadoop/hdds/utils/db/DBDefinition.java | 41 +-
.../org/apache/hadoop/hdds/utils/db/DBStore.java | 8 +-
.../hadoop/hdds/utils/db/DBStoreBuilder.java | 41 +-
.../org/apache/hadoop/hdds/utils/db/RDBStore.java | 17 +-
.../hadoop/hdds/utils/db/RDBStoreIterator.java | 40 +-
.../org/apache/hadoop/hdds/utils/db/RDBTable.java | 105 ++
.../org/apache/hadoop/hdds/utils/db/Table.java | 69 +
.../apache/hadoop/hdds/utils/db/TypedTable.java | 59 +
.../resources/webapps/static/angular-1.7.9.min.js | 350 ----
.../resources/webapps/static/angular-1.8.0.min.js | 350 ++++
.../webapps/static/angular-route-1.7.9.min.js | 17 -
.../webapps/static/angular-route-1.8.0.min.js | 17 +
.../x509/certificates/TestRootCertificate.java | 42 +-
.../apache/hadoop/hdds/server/TestJsonUtils.java | 5 +-
.../hadoop/hdds/utils/db/TestRDBStoreIterator.java | 10 +-
.../hadoop/hdds/utils/db/TestRDBTableStore.java | 61 +
hadoop-hdds/hadoop-dependency-client/README.md | 18 +-
hadoop-hdds/hadoop-dependency-client/pom.xml | 4 +-
hadoop-hdds/hadoop-dependency-server/pom.xml | 8 +-
hadoop-hdds/hadoop-dependency-test/pom.xml | 4 +-
hadoop-hdds/interface-admin/pom.xml | 4 +-
.../src/main/{proto => resources}/proto.lock | 0
hadoop-hdds/interface-client/pom.xml | 4 +-
.../src/main/proto/DatanodeClientProtocol.proto | 4 +
.../interface-client/src/main/proto/hdds.proto | 16 +-
.../interface-client/src/main/proto/proto.lock | 1938 -------------------
.../interface-client/src/main/resources/proto.lock | 1958 ++++++++++++++++++++
hadoop-hdds/interface-server/pom.xml | 4 +-
.../proto/ScmServerDatanodeHeartbeatProtocol.proto | 9 +-
.../src/main/proto/ScmServerProtocol.proto | 2 +
.../interface-server/src/main/proto/proto.lock | 1786 ------------------
.../interface-server/src/main/resources/proto.lock | 1786 ++++++++++++++++++
hadoop-hdds/pom.xml | 20 +-
hadoop-hdds/server-scm/pom.xml | 5 +-
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 14 +-
.../block/DatanodeDeletedBlockTransactions.java | 32 +-
.../hadoop/hdds/scm/block/DeletedBlockLogImpl.java | 24 +-
.../hdds/scm/block/SCMBlockDeletingService.java | 7 +-
.../container/AbstractContainerReportHandler.java | 101 +-
.../hdds/scm/container/ContainerReplica.java | 43 +-
.../hdds/scm/container/ContainerReportHandler.java | 14 +-
.../hdds/scm/container/ContainerStateManager.java | 8 +-
.../IncrementalContainerReportHandler.java | 2 +-
.../hdds/scm/container/ReplicationManager.java | 133 +-
.../hdds/scm/container/SCMContainerManager.java | 70 +-
.../apache/hadoop/hdds/scm/node/DatanodeInfo.java | 55 +
.../hadoop/hdds/scm/node/DeadNodeHandler.java | 2 +-
.../apache/hadoop/hdds/scm/node/NodeManager.java | 6 +
.../hadoop/hdds/scm/node/NodeStateManager.java | 30 +-
.../hadoop/hdds/scm/node/SCMNodeManager.java | 68 +
.../scm/pipeline/BackgroundPipelineCreator.java | 35 +-
.../hadoop/hdds/scm/pipeline/PipelineManager.java | 4 +
.../hdds/scm/pipeline/PipelinePlacementPolicy.java | 42 +-
.../hdds/scm/pipeline/PipelineReportHandler.java | 2 +-
.../hdds/scm/pipeline/RatisPipelineProvider.java | 39 +-
.../hdds/scm/pipeline/RatisPipelineUtils.java | 2 +-
.../hdds/scm/pipeline/SCMPipelineManager.java | 40 +-
.../algorithms/HealthyPipelineChoosePolicy.java | 46 +
.../algorithms/PipelineChoosePolicyFactory.java | 106 ++
.../algorithms/RandomPipelineChoosePolicy.java | 38 +
.../pipeline/choose/algorithms/package-info.java | 18 +
.../algorithms/DefaultLeaderChoosePolicy.java | 42 +
.../choose/algorithms/LeaderChoosePolicy.java | 55 +
.../algorithms/LeaderChoosePolicyFactory.java | 75 +
.../algorithms/MinLeaderCountChoosePolicy.java | 91 +
.../leader/choose/algorithms/package-info.java | 19 +
.../SCMSecurityProtocolServerSideTranslatorPB.java | 17 +-
...lockLocationProtocolServerSideTranslatorPB.java | 2 +-
...inerLocationProtocolServerSideTranslatorPB.java | 17 +-
.../hdds/scm/safemode/ContainerSafeModeRule.java | 4 +-
.../hdds/scm/server/SCMBlockProtocolServer.java | 7 +-
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 4 +-
.../apache/hadoop/hdds/scm/server/SCMMXBean.java | 6 +
.../hdds/scm/server/StorageContainerManager.java | 31 +-
.../src/main/resources/webapps/scm/index.html | 4 +-
.../main/resources/webapps/scm/scm-overview.html | 27 +-
.../src/main/resources/webapps/scm/scm.js | 4 -
.../java/org/apache/hadoop/hdds/scm/TestUtils.java | 47 +-
.../hadoop/hdds/scm/block/TestBlockManager.java | 135 ++
.../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 17 +-
.../hadoop/hdds/scm/container/MockNodeManager.java | 50 +
.../scm/container/TestContainerReportHandler.java | 197 +-
.../hdds/scm/container/TestReplicationManager.java | 92 +
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 5 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 51 +-
.../scm/pipeline/TestPipelinePlacementPolicy.java | 85 +-
.../hdds/scm/pipeline/TestSCMPipelineManager.java | 161 ++
...TestSCMStoreImplWithOldPipelineIDKeyFormat.java | 180 ++
.../TestPipelineChoosePolicyFactory.java | 94 +
.../choose/algorithms/TestLeaderChoosePolicy.java | 74 +
.../ozone/container/common/TestEndPoint.java | 2 +-
.../testutils/ReplicationNodeManagerMock.java | 15 +
hadoop-hdds/test-utils/pom.xml | 4 +-
hadoop-hdds/tools/pom.xml | 12 +-
.../org/apache/hadoop/hdds/cli/OzoneAdmin.java | 67 +
.../org/apache/hadoop/hdds/cli/package-info.java | 22 +
.../hdds/scm/cli/ReplicationManagerCommands.java | 23 +-
.../scm/cli/ReplicationManagerStartSubcommand.java | 21 +-
.../cli/ReplicationManagerStatusSubcommand.java | 32 +-
.../scm/cli/ReplicationManagerStopSubcommand.java | 25 +-
.../hdds/scm/cli/SafeModeCheckSubcommand.java | 40 +-
.../hadoop/hdds/scm/cli/SafeModeCommands.java | 27 +-
.../hdds/scm/cli/SafeModeExitSubcommand.java | 22 +-
.../hdds/scm/cli/SafeModeWaitSubcommand.java | 13 +-
.../org/apache/hadoop/hdds/scm/cli/ScmOption.java | 72 +
.../apache/hadoop/hdds/scm/cli/ScmSubcommand.java | 43 +
.../hadoop/hdds/scm/cli/TopologySubcommand.java | 65 +-
.../hdds/scm/cli/container/CloseSubcommand.java | 20 +-
.../hdds/scm/cli/container/ContainerCommands.java | 21 +-
.../hdds/scm/cli/container/CreateSubcommand.java | 26 +-
.../hdds/scm/cli/container/DeleteSubcommand.java | 20 +-
.../hdds/scm/cli/container/InfoSubcommand.java | 48 +-
.../hdds/scm/cli/container/ListSubcommand.java | 32 +-
.../hdds/scm/cli/container/WithScmClient.java | 29 -
.../hdds/scm/cli/datanode/DatanodeCommands.java | 21 +-
.../hdds/scm/cli/datanode/ListInfoSubcommand.java | 48 +-
.../cli/pipeline/ActivatePipelineSubcommand.java | 19 +-
.../scm/cli/pipeline/ClosePipelineSubcommand.java | 19 +-
.../scm/cli/pipeline/CreatePipelineSubcommand.java | 48 +-
.../cli/pipeline/DeactivatePipelineSubcommand.java | 19 +-
.../scm/cli/pipeline/ListPipelinesSubcommand.java | 40 +-
.../hdds/scm/cli/pipeline/PipelineCommands.java | 22 +-
hadoop-ozone/client/pom.xml | 4 +-
.../org/apache/hadoop/ozone/client/BucketArgs.java | 72 +-
.../apache/hadoop/ozone/client/ObjectStore.java | 3 +
.../apache/hadoop/ozone/client/OzoneBucket.java | 126 +-
.../hadoop/ozone/client/OzoneClientFactory.java | 8 +-
.../apache/hadoop/ozone/client/OzoneVolume.java | 103 +-
.../org/apache/hadoop/ozone/client/VolumeArgs.java | 45 +-
.../ozone/client/io/BlockOutputStreamEntry.java | 30 +-
.../client/io/BlockOutputStreamEntryPool.java | 40 +-
.../hadoop/ozone/client/io/KeyInputStream.java | 98 +-
.../hadoop/ozone/client/io/KeyOutputStream.java | 60 +-
.../hadoop/ozone/client/io/OzoneInputStream.java | 5 +
.../ozone/client/protocol/ClientProtocol.java | 29 +-
.../apache/hadoop/ozone/client/rpc/RpcClient.java | 117 +-
hadoop-ozone/common/pom.xml | 4 +-
.../main/java/org/apache/hadoop/ozone/OmUtils.java | 75 +
.../org/apache/hadoop/ozone/audit/OMAction.java | 6 +-
.../apache/hadoop/ozone/freon/OzoneGetConf.java | 278 ---
.../apache/hadoop/ozone/freon/package-info.java | 21 -
.../org/apache/hadoop/ozone/om/OMConfigKeys.java | 7 +
.../hadoop/ozone/om/exceptions/OMException.java | 11 +-
.../ozone/om/exceptions/OMNotLeaderException.java | 2 +-
.../ozone/om/ha/OMFailoverProxyProvider.java | 253 ++-
.../ozone/om/helpers/BucketEncryptionKeyInfo.java | 4 +
.../hadoop/ozone/om/helpers/OmBucketArgs.java | 50 +-
.../hadoop/ozone/om/helpers/OmBucketInfo.java | 214 ++-
.../apache/hadoop/ozone/om/helpers/OmKeyArgs.java | 18 +
.../apache/hadoop/ozone/om/helpers/OmKeyInfo.java | 37 +-
.../hadoop/ozone/om/helpers/OmKeyLocationInfo.java | 24 +-
.../ozone/om/helpers/OmKeyLocationInfoGroup.java | 111 +-
.../hadoop/ozone/om/helpers/OmPrefixInfo.java | 215 ---
.../hadoop/ozone/om/helpers/OmRenameKeys.java | 59 +
.../hadoop/ozone/om/helpers/OmVolumeArgs.java | 64 +-
.../hadoop/ozone/om/helpers/OzoneFSUtils.java | 30 +
.../hadoop/ozone/om/helpers/OzoneFileStatus.java | 18 +
.../hadoop/ozone/om/helpers/RepeatedOmKeyInfo.java | 9 +-
.../ozone/om/protocol/OzoneManagerProtocol.java | 15 +-
.../ozone/om/protocolPB/Hadoop3OmTransport.java | 175 +-
...OzoneManagerProtocolClientSideTranslatorPB.java | 44 +-
.../ozone/security/OzoneTokenIdentifier.java | 61 +-
.../org/apache/hadoop/ozone/util/ExitManager.java | 33 +
.../apache/hadoop/ozone/util/OzoneVersionInfo.java | 15 +-
.../java/org/apache/hadoop/ozone/TestOmUtils.java | 49 +
.../hadoop/ozone/om/helpers/TestOmBucketInfo.java | 22 +-
.../om/helpers/TestOmKeyLocationInfoGroup.java | 59 +
.../hadoop/ozone/om/helpers/TestOmVolumeArgs.java | 4 +-
.../hadoop/ozone/om/helpers/TestOzoneFsUtils.java | 39 +
hadoop-ozone/csi/pom.xml | 4 +-
.../csi/src/main/{proto => resources}/proto.lock | 0
hadoop-ozone/datanode/pom.xml | 4 +-
hadoop-ozone/dev-support/checks/acceptance.sh | 2 +
hadoop-ozone/dev-support/checks/bats.sh | 35 +
hadoop-ozone/dev-support/checks/build.sh | 2 +-
hadoop-ozone/dev-support/checks/findbugs.sh | 7 +-
hadoop-ozone/dev-support/checks/kubernetes.sh | 36 +
hadoop-ozone/dev-support/intellij/core-site.xml | 27 +
hadoop-ozone/dist/README.md | 52 +-
.../dist/dev-support/bin/dist-layout-stitching | 5 +
hadoop-ozone/dist/pom.xml | 13 +-
hadoop-ozone/dist/src/main/compose/failing1/.env | 1 +
.../src/main/compose/failing1/docker-compose.yaml | 1 +
.../dist/src/main/compose/failing1/docker-config | 1 +
.../dist/src/main/compose/failing1/test.sh | 36 +
hadoop-ozone/dist/src/main/compose/failing2/.env | 1 +
.../src/main/compose/failing2/docker-compose.yaml | 1 +
.../dist/src/main/compose/failing2/docker-config | 1 +
.../dist/src/main/compose/failing2/test.sh | 36 +
.../dist/src/main/compose/ozone-csi/docker-config | 3 +
hadoop-ozone/dist/src/main/compose/ozone-ha/.env | 19 +
.../src/main/compose/ozone-ha/docker-compose.yaml | 93 +
.../dist/src/main/compose/ozone-ha/docker-config | 35 +
.../dist/src/main/compose/ozone-ha/test.sh | 33 +
.../dist/src/main/compose/ozone-mr/common-config | 3 +-
.../main/compose/ozone-mr/hadoop27/docker-config | 1 +
.../src/main/compose/ozone-mr/hadoop27/test.sh | 7 +-
.../main/compose/ozone-mr/hadoop31/docker-config | 1 +
.../src/main/compose/ozone-mr/hadoop31/test.sh | 7 +-
.../main/compose/ozone-mr/hadoop32/docker-config | 1 +
.../src/main/compose/ozone-mr/hadoop32/test.sh | 7 +-
.../dist/src/main/compose/ozone-mr/test.sh | 37 +
.../src/main/compose/ozone-om-ha-s3/docker-config | 5 +-
.../dist/src/main/compose/ozone-om-ha-s3/test.sh | 2 +
.../src/main/compose/ozone-om-ha/docker-config | 4 +-
.../src/main/compose/ozone-topology/docker-config | 7 +-
.../dist/src/main/compose/ozone-topology/test.sh | 4 +-
.../dist/src/main/compose/ozone/docker-config | 7 +-
hadoop-ozone/dist/src/main/compose/ozone/test.sh | 20 +-
.../src/main/compose/ozoneblockade/docker-config | 2 +
.../src/main/compose/ozones3-haproxy/docker-config | 3 +
.../src/main/compose/ozonesecure-mr/docker-config | 4 +-
.../dist/src/main/compose/ozonesecure-mr/test.sh | 5 +-
.../main/compose/ozonesecure-om-ha/docker-config | 11 +-
.../src/main/compose/ozonesecure-om-ha/test.sh | 2 +
.../src/main/compose/ozonesecure/docker-config | 9 +-
.../dist/src/main/compose/ozonesecure/test.sh | 17 +-
hadoop-ozone/dist/src/main/compose/test-all.sh | 28 +-
hadoop-ozone/dist/src/main/compose/testlib.sh | 105 +-
hadoop-ozone/dist/src/main/compose/upgrade/.env | 21 +
.../dist/src/main/compose/upgrade/README.md | 29 +
.../src/main/compose/upgrade/docker-compose.yaml | 127 ++
.../dist/src/main/compose/upgrade/docker-config | 34 +
hadoop-ozone/dist/src/main/compose/upgrade/test.sh | 69 +
.../src/main/compose/upgrade/versions/README.md | 15 +
.../main/compose/upgrade/versions/ozone-0.5.0.sh | 19 +
.../main/compose/upgrade/versions/ozone-1.0.0.sh | 19 +
hadoop-ozone/dist/src/main/k8s/.gitignore | 15 +
.../src/main/k8s/definitions/ozone/config.yaml | 1 +
.../k8s/definitions/ozone/definitions/onenode.yaml | 2 +-
.../main/k8s/definitions/ozone/freon/freon.yaml | 2 +-
.../{pv-test => test-webserver}/flekszible.yaml | 0
.../webserver-deployment.yaml | 0
.../webserver-service.yaml | 0
.../webserver-volume.yaml | 0
.../examples/getting-started/config-configmap.yaml | 1 +
.../getting-started/datanode-statefulset.yaml | 10 -
.../getting-started/freon/freon-deployment.yaml | 2 +-
.../src/main/k8s/examples/getting-started/test.sh | 39 +
.../k8s/examples/minikube/config-configmap.yaml | 1 +
.../examples/minikube/freon/freon-deployment.yaml | 2 +-
.../dist/src/main/k8s/examples/minikube/test.sh | 39 +
.../src/main/k8s/examples/ozone-dev/Flekszible | 4 +-
.../k8s/examples/ozone-dev/config-configmap.yaml | 1 +
.../ozone-dev/csi/csi-provisioner-deployment.yaml | 2 +-
.../examples/ozone-dev/datanode-statefulset.yaml | 10 -
.../examples/ozone-dev/freon/freon-deployment.yaml | 2 +-
.../dist/src/main/k8s/examples/ozone-dev/test.sh | 39 +
.../dist/src/main/k8s/examples/ozone/Flekszible | 2 +-
.../main/k8s/examples/ozone/config-configmap.yaml | 1 +
.../ozone/csi/csi-provisioner-deployment.yaml | 2 +-
.../k8s/examples/ozone/freon/freon-deployment.yaml | 2 +-
.../dist/src/main/k8s/examples/ozone/test.sh | 39 +
.../dist/src/main/k8s/examples/test-all.sh | 49 +
hadoop-ozone/dist/src/main/k8s/examples/testlib.sh | 144 ++
hadoop-ozone/dist/src/main/license/bin/LICENSE.txt | 4 +-
.../dist/src/main/smoketest/admincli/admin.robot | 32 +
.../src/main/smoketest/admincli/container.robot | 73 +
.../src/main/smoketest/admincli/datanode.robot | 19 +-
.../src/main/smoketest/admincli/pipeline.robot | 49 +-
.../smoketest/admincli/replicationmanager.robot | 53 +
.../src/main/smoketest/admincli/safemode.robot | 45 +
.../main/smoketest/auditparser/auditparser.robot | 2 +-
.../dist/src/main/smoketest/basic/basic.robot | 2 +-
.../dist/src/main/smoketest/basic/getconf.robot | 46 +
.../dist/src/main/smoketest/basic/links.robot | 152 ++
.../src/main/smoketest/basic/ozone-shell-lib.robot | 179 ++
.../main/smoketest/basic/ozone-shell-single.robot | 27 +
.../src/main/smoketest/basic/ozone-shell.robot | 121 +-
.../dist/src/main/smoketest/commonlib.robot | 34 +-
.../dist/src/main/smoketest/createbucketenv.robot | 2 +-
.../dist/src/main/smoketest/createmrenv.robot | 15 +-
.../src/main/smoketest/debug/ozone-debug.robot | 6 +-
.../dist/src/main/smoketest/failing/test1.robot | 21 +
.../dist/src/main/smoketest/failing/test2.robot | 21 +
.../dist/src/main/smoketest/freon/freon.robot | 2 +-
.../dist/src/main/smoketest/gdpr/gdpr.robot | 2 +-
hadoop-ozone/dist/src/main/smoketest/lib/os.robot | 49 +
.../dist/src/main/smoketest/lib/os_tests.robot | 38 +
.../dist/src/main/smoketest/mapreduce.robot | 11 +-
.../main/smoketest/om-ratis/testOMAdminCmd.robot | 2 +-
.../dist/src/main/smoketest/omha/testOMHA.robot | 2 +-
.../dist/src/main/smoketest/ozone-lib/shell.robot | 53 +
.../src/main/smoketest/ozone-lib/shell_tests.robot | 58 +
.../src/main/smoketest/ozonefs/hadoopo3fs.robot | 12 +-
.../dist/src/main/smoketest/ozonefs/ozonefs.robot | 2 +-
.../dist/src/main/smoketest/ozonefs/setup.robot | 20 +-
.../dist/src/main/smoketest/recon/recon-api.robot | 18 +-
.../src/main/smoketest/s3/MultipartUpload.robot | 133 +-
.../dist/src/main/smoketest/s3/bucketdelete.robot | 12 +-
.../dist/src/main/smoketest/s3/buckethead.robot | 5 +-
.../dist/src/main/smoketest/s3/commonawslib.robot | 40 +-
.../dist/src/main/smoketest/s3/objectcopy.robot | 23 +-
.../dist/src/main/smoketest/s3/objectdelete.robot | 28 +-
.../src/main/smoketest/s3/objectmultidelete.robot | 24 +-
.../dist/src/main/smoketest/s3/objectputget.robot | 40 +-
.../src/main/smoketest/s3/s3_compatbility_check.sh | 47 +
.../smoketest/security/bucket-encryption.robot | 45 +
.../main/smoketest/security/ozone-secure-s3.robot | 15 +
.../smoketest/security/ozone-secure-token.robot | 16 +-
.../dist/src/main/smoketest/spnego/web.robot | 16 +-
.../src/main/smoketest/topology/loaddata.robot | 2 +-
hadoop-ozone/dist/src/shell/hdds/hadoop-daemons.sh | 2 +-
.../dist/src/shell/hdds/hadoop-functions.sh | 6 +-
hadoop-ozone/dist/src/shell/ozone/ozone | 22 +-
hadoop-ozone/dist/src/shell/ozone/stop-ozone.sh | 8 +-
hadoop-ozone/dist/src/shell/upgrade/1.0.0.sh | 23 +
.../src/shell/upgrade/1.0.0/01-migrate-scm-db.sh | 24 +
.../dist/src/test/shell/compose_testlib.bats | 37 +
hadoop-ozone/dist/src/test/shell/gc_opts.bats | 6 +-
hadoop-ozone/dist/src/test/shell/k8s_testlib.bats | 55 +
hadoop-ozone/dist/src/test/shell/test1/test.sh | 15 +
hadoop-ozone/dist/src/test/shell/test2/test.sh | 17 +
.../dist/src/test/shell/test3/subtest1/test.sh | 17 +
hadoop-ozone/dist/src/test/shell/test4/test.sh | 17 +
.../fault-injection-test/mini-chaos-tests/pom.xml | 4 +-
.../hadoop/ozone/TestMiniChaosOzoneCluster.java | 40 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
.../fault-injection-test/network-tests/pom.xml | 2 +-
.../src/test/blockade/ozone/client.py | 10 +-
hadoop-ozone/fault-injection-test/pom.xml | 4 +-
hadoop-ozone/insight/pom.xml | 4 +-
.../hadoop/ozone/insight/BaseInsightPoint.java | 34 +-
.../ozone/insight/BaseInsightSubCommand.java | 6 +
.../org/apache/hadoop/ozone/insight/Insight.java | 4 +
.../apache/hadoop/ozone/insight/InsightPoint.java | 4 +-
.../hadoop/ozone/insight/MetricGroupDisplay.java | 4 +-
.../hadoop/ozone/insight/MetricsSubCommand.java | 33 +-
.../datanode/DatanodeDispatcherInsight.java | 107 ++
.../insight/datanode/PipelineComponentUtil.java | 78 +
.../ozone/insight/datanode/RatisInsight.java | 45 +-
.../hadoop/ozone/insight/om/KeyManagerInsight.java | 2 +-
.../hadoop/ozone/insight/om/OmProtocolInsight.java | 2 +-
.../ozone/insight/scm/NodeManagerInsight.java | 2 +-
.../ozone/insight/scm/ReplicaManagerInsight.java | 2 +-
.../scm/ScmProtocolBlockLocationInsight.java | 2 +-
.../scm/ScmProtocolContainerLocationInsight.java | 2 +-
.../insight/scm/ScmProtocolDatanodeInsight.java | 2 +-
.../insight/scm/ScmProtocolSecurityInsight.java | 2 +-
hadoop-ozone/integration-test/pom.xml | 4 +-
.../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 391 ++++
.../hadoop/fs/ozone/TestOzoneFileInterfaces.java | 26 +-
.../hadoop/fs/ozone/TestOzoneFileSystem.java | 182 +-
.../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 253 ++-
.../hadoop/fs/ozone/contract/OzoneContract.java | 18 +-
.../ozone/contract/rooted/RootedOzoneContract.java | 25 +-
.../hdds/scm/pipeline/TestLeaderChoosePolicy.java | 216 +++
.../hadoop/hdds/scm/pipeline/TestNodeFailure.java | 2 +-
.../TestRatisPipelineCreateAndDestroy.java | 2 +-
.../org/apache/hadoop/ozone/OzoneTestUtils.java | 20 +-
.../java/org/apache/hadoop/ozone/TestDataUtil.java | 13 +-
.../apache/hadoop/ozone/TestMiniOzoneCluster.java | 4 +-
.../hadoop/ozone/TestOzoneConfigurationFields.java | 11 +-
.../hadoop/ozone/TestStorageContainerManager.java | 35 +-
.../ozone/TestStorageContainerManagerHelper.java | 31 +-
.../ozone/client/rpc/Test2WayCommitInRatis.java | 16 +-
.../rpc/TestBlockOutputStreamWithFailures.java | 32 +-
...estBlockOutputStreamWithFailuresFlushDelay.java | 32 +-
.../rpc/TestCloseContainerHandlingByClient.java | 55 -
.../hadoop/ozone/client/rpc/TestCommitWatcher.java | 78 +-
.../rpc/TestContainerReplicationEndToEnd.java | 19 +-
.../TestContainerStateMachineFailureOnRead.java | 27 +-
.../rpc/TestContainerStateMachineFailures.java | 47 +-
.../client/rpc/TestDeleteWithSlowFollower.java | 28 +-
.../client/rpc/TestDiscardPreallocatedBlocks.java | 186 ++
.../client/rpc/TestFailureHandlingByClient.java | 28 +-
.../rpc/TestFailureHandlingByClientFlushDelay.java | 28 +-
.../ozone/client/rpc/TestKeyInputStream.java | 119 +-
.../rpc/TestMultiBlockWritesWithDnFailures.java | 28 +-
.../rpc/TestOzoneClientRetriesOnException.java | 2 +-
...estOzoneClientRetriesOnExceptionFlushDelay.java | 2 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 845 ++++++++-
.../hadoop/ozone/client/rpc/TestReadRetries.java | 40 +-
.../ozone/client/rpc/TestSecureOzoneRpcClient.java | 3 -
.../client/rpc/TestValidateBCSIDOnRestart.java | 36 +-
.../ozone/client/rpc/TestWatchForCommit.java | 35 +-
.../commandhandler/TestBlockDeletion.java | 171 +-
.../ozoneimpl/TestOzoneContainerRatis.java | 138 --
.../container/ozoneimpl/TestRatisManager.java | 124 --
.../hadoop/ozone/freon/TestDataValidate.java | 18 +-
.../ozone/freon/TestFreonWithDatanodeRestart.java | 17 +-
.../ozone/freon/TestFreonWithPipelineDestroy.java | 17 +-
.../ozone/freon/TestHadoopNestedDirGenerator.java | 203 ++
.../ozone/freon/TestOzoneClientKeyGenerator.java | 2 -
.../hadoop/ozone/freon/TestRandomKeyGenerator.java | 18 +-
.../hadoop/ozone/fsck/TestContainerMapper.java | 3 +
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 32 +-
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 215 ++-
.../org/apache/hadoop/ozone/om/TestOmLDBCli.java | 120 ++
.../org/apache/hadoop/ozone/om/TestOmMetrics.java | 110 +-
.../org/apache/hadoop/ozone/om/TestOmSQLCli.java | 235 ---
.../ozone/om/TestOzoneManagerHAMetadataOnly.java | 76 +
.../ozone/om/TestOzoneManagerListVolumes.java | 18 +-
.../hadoop/ozone/om/TestOzoneManagerRestart.java | 26 +-
.../ozone/om/TestOzoneManagerRocksDBLogging.java | 2 +-
.../snapshot/TestOzoneManagerSnapshotProvider.java | 26 +-
.../ozone/recon/TestReconWithOzoneManager.java | 46 +-
.../hadoop/ozone/scm/TestCloseContainer.java | 148 ++
.../hadoop/ozone/scm/TestContainerSmallFile.java | 20 +-
.../scm/TestGetCommittedBlockLengthAndPutKey.java | 4 +-
.../hadoop/ozone/scm/TestXceiverClientGrpc.java | 6 +-
.../hadoop/ozone/shell/TestOzoneDatanodeShell.java | 2 +-
.../hadoop/ozone/shell/TestOzoneShellHA.java | 50 +-
.../src/test/resources/log4j.properties | 3 -
hadoop-ozone/interface-client/pom.xml | 4 +-
.../src/main/proto/OmClientProtocol.proto | 60 +
.../src/main/{proto => resources}/proto.lock | 0
.../dev-support/findbugsExcludeFile.xml | 21 +
hadoop-ozone/interface-storage/pom.xml | 103 +
.../apache/hadoop/ozone/om/OMMetadataManager.java | 385 ++++
.../ozone/om/codec/OMTransactionInfoCodec.java | 0
.../hadoop/ozone/om/codec/OmBucketInfoCodec.java | 0
.../hadoop/ozone/om/codec/OmKeyInfoCodec.java | 69 +
.../ozone/om/codec/OmMultipartKeyInfoCodec.java | 0
.../hadoop/ozone/om/codec/OmPrefixInfoCodec.java | 59 +
.../hadoop/ozone/om/codec/OmVolumeArgsCodec.java | 0
.../ozone/om/codec/RepeatedOmKeyInfoCodec.java | 68 +
.../hadoop/ozone/om/codec/S3SecretValueCodec.java | 0
.../ozone/om/codec/TokenIdentifierCodec.java | 66 +
.../hadoop/ozone/om/codec/UserVolumeInfoCodec.java | 0
.../apache/hadoop/ozone/om/codec/package-info.java | 24 +
.../hadoop/ozone/om/helpers/OmPrefixInfo.java | 216 +++
.../hadoop/ozone/om/helpers/OzoneAclStorage.java | 63 +
.../ozone/om/helpers/OzoneAclStorageUtil.java | 62 +
.../hadoop/ozone/om/helpers/package-info.java | 24 +
.../org/apache/hadoop/ozone/om/package-info.java | 24 +
.../hadoop/ozone/om/ratis/OMTransactionInfo.java | 163 ++
.../apache/hadoop/ozone/om/ratis/package-info.java | 24 +
.../src/main/proto/OmStorageProtocol.proto | 60 +
.../ozone/om/codec/TestOMTransactionInfoCodec.java | 0
.../hadoop/ozone/om/codec/TestOmKeyInfoCodec.java | 116 ++
.../om/codec/TestOmMultipartKeyInfoCodec.java | 0
.../ozone/om/codec/TestOmPrefixInfoCodec.java | 0
.../ozone/om/codec/TestRepeatedOmKeyInfoCodec.java | 121 ++
.../ozone/om/codec/TestS3SecretValueCodec.java | 0
.../apache/hadoop/ozone/om/codec/package-info.java | 0
.../hadoop/ozone/om/helpers/TestOmPrefixInfo.java | 0
.../hadoop/ozone/om/helpers/package-info.java | 24 +
hadoop-ozone/ozone-manager/pom.xml | 10 +-
.../apache/hadoop/ozone/om/BucketManagerImpl.java | 116 +-
.../apache/hadoop/ozone/om/KeyDeletingService.java | 9 +-
.../org/apache/hadoop/ozone/om/KeyManager.java | 17 +-
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 351 ++--
.../apache/hadoop/ozone/om/OMMetadataManager.java | 385 ----
.../java/org/apache/hadoop/ozone/om/OMMetrics.java | 37 +
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 70 +-
.../hadoop/ozone/om/OpenKeyCleanupService.java | 45 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 744 ++++++--
.../org/apache/hadoop/ozone/om/ResolvedBucket.java | 111 ++
.../org/apache/hadoop/ozone/om/VolumeManager.java | 9 -
.../apache/hadoop/ozone/om/VolumeManagerImpl.java | 39 -
.../hadoop/ozone/om/codec/OMDBDefinition.java | 19 +-
.../hadoop/ozone/om/codec/OmKeyInfoCodec.java | 58 -
.../hadoop/ozone/om/codec/OmPrefixInfoCodec.java | 58 -
.../ozone/om/codec/RepeatedOmKeyInfoCodec.java | 57 -
.../ozone/om/codec/TokenIdentifierCodec.java | 56 -
.../apache/hadoop/ozone/om/codec/package-info.java | 3 +
.../apache/hadoop/ozone/om/fs/OzoneManagerFS.java | 53 +-
.../hadoop/ozone/om/ratis/OMTransactionInfo.java | 153 --
.../ozone/om/ratis/OzoneManagerRatisServer.java | 18 +-
.../ozone/om/ratis/OzoneManagerStateMachine.java | 26 +-
.../om/ratis/utils/OzoneManagerRatisUtils.java | 40 +-
.../hadoop/ozone/om/request/OMClientRequest.java | 94 +-
.../om/request/bucket/OMBucketCreateRequest.java | 52 +
.../request/bucket/OMBucketSetPropertyRequest.java | 64 +
.../bucket/acl/OMBucketRemoveAclRequest.java | 2 +-
.../request/bucket/acl/OMBucketSetAclRequest.java | 2 +-
.../om/request/file/OMDirectoryCreateRequest.java | 4 +
.../ozone/om/request/file/OMFileCreateRequest.java | 57 +-
.../ozone/om/request/file/OMFileRequest.java | 2 +-
.../om/request/key/OMAllocateBlockRequest.java | 61 +-
.../ozone/om/request/key/OMKeyCommitRequest.java | 79 +-
.../ozone/om/request/key/OMKeyCreateRequest.java | 127 +-
.../ozone/om/request/key/OMKeyDeleteRequest.java | 45 +-
.../ozone/om/request/key/OMKeyRenameRequest.java | 26 +-
.../hadoop/ozone/om/request/key/OMKeyRequest.java | 171 +-
.../ozone/om/request/key/OMKeysDeleteRequest.java | 66 +-
.../ozone/om/request/key/OMKeysRenameRequest.java | 271 +++
.../om/request/key/OMTrashRecoverRequest.java | 7 +
.../om/request/key/acl/OMKeyRemoveAclRequest.java | 2 +-
.../om/request/key/acl/OMKeySetAclRequest.java | 2 +-
.../key/acl/prefix/OMPrefixRemoveAclRequest.java | 2 +-
.../key/acl/prefix/OMPrefixSetAclRequest.java | 2 +-
.../S3InitiateMultipartUploadRequest.java | 60 +-
.../multipart/S3MultipartUploadAbortRequest.java | 71 +-
.../S3MultipartUploadCommitPartRequest.java | 73 +-
.../S3MultipartUploadCompleteRequest.java | 86 +-
.../om/request/volume/OMVolumeCreateRequest.java | 6 +
.../om/request/volume/OMVolumeSetQuotaRequest.java | 52 +-
.../hadoop/ozone/om/response/CleanupTableInfo.java | 3 +-
.../om/response/file/OMFileCreateResponse.java | 10 +-
.../response/key/AbstractOMKeyDeleteResponse.java | 126 ++
.../om/response/key/OMAllocateBlockResponse.java | 18 +-
.../ozone/om/response/key/OMKeyCommitResponse.java | 18 +-
.../ozone/om/response/key/OMKeyCreateResponse.java | 20 +-
.../ozone/om/response/key/OMKeyDeleteResponse.java | 73 +-
.../om/response/key/OMKeysDeleteResponse.java | 56 +-
.../om/response/key/OMKeysRenameResponse.java | 80 +
.../om/response/key/OMOpenKeysDeleteRequest.java | 192 ++
.../om/response/key/OMOpenKeysDeleteResponse.java | 72 +
.../multipart/S3MultipartUploadAbortResponse.java | 20 +-
.../S3MultipartUploadCommitPartResponse.java | 21 +-
.../om/snapshot/OzoneManagerSnapshotProvider.java | 10 +-
...OzoneManagerProtocolServerSideTranslatorPB.java | 4 +-
.../protocolPB/OzoneManagerRequestHandler.java | 2 +-
.../OzoneDelegationTokenSecretManager.java | 2 +-
.../main/resources/webapps/ozoneManager/index.html | 4 +-
.../main/resources/webapps/ozoneManager/main.html | 2 +
.../webapps/ozoneManager/om-overview.html | 26 +
.../resources/webapps/ozoneManager/ozoneManager.js | 7 +-
.../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 43 +-
.../hadoop/ozone/om/TestOmMetadataManager.java | 79 +-
.../hadoop/ozone/om/failover/TestOMFailovers.java | 151 ++
...tOzoneManagerDoubleBufferWithDummyResponse.java | 2 +-
...TestOzoneManagerDoubleBufferWithOMResponse.java | 2 +-
.../ozone/om/request/TestNormalizePaths.java | 109 ++
.../ozone/om/request/TestOMRequestUtils.java | 130 +-
.../bucket/TestOMBucketSetPropertyRequest.java | 41 +-
.../request/file/TestOMDirectoryCreateRequest.java | 6 +
.../om/request/key/TestOMAllocateBlockRequest.java | 22 +-
.../om/request/key/TestOMKeyCreateRequest.java | 182 +-
.../key/TestOMKeyPurgeRequestAndResponse.java | 15 +-
.../ozone/om/request/key/TestOMKeyRequest.java | 17 +
.../om/request/key/TestOMKeysRenameRequest.java | 160 ++
.../request/key/TestOMOpenKeysDeleteRequest.java | 419 +++++
.../TestS3InitiateMultipartUploadRequest.java | 59 +-
.../s3/multipart/TestS3MultipartRequest.java | 17 +-
.../TestS3MultipartUploadCommitPartRequest.java | 2 +-
.../volume/TestOMVolumeSetOwnerRequest.java | 2 +-
.../volume/TestOMVolumeSetQuotaRequest.java | 69 +-
.../bucket/TestOMBucketCreateResponse.java | 8 +
.../bucket/TestOMBucketDeleteResponse.java | 8 +
.../bucket/TestOMBucketSetPropertyResponse.java | 9 +
.../file/TestOMDirectoryCreateResponse.java | 8 +
.../response/key/TestOMAllocateBlockResponse.java | 27 +-
.../om/response/key/TestOMKeyCommitResponse.java | 19 +-
.../om/response/key/TestOMKeyCreateResponse.java | 23 +-
.../om/response/key/TestOMKeyDeleteResponse.java | 27 +-
.../ozone/om/response/key/TestOMKeyResponse.java | 3 +
.../om/response/key/TestOMKeysDeleteResponse.java | 38 +-
.../om/response/key/TestOMKeysRenameResponse.java | 131 ++
.../response/key/TestOMOpenKeysDeleteResponse.java | 185 ++
.../s3/multipart/TestS3MultipartResponse.java | 16 +-
.../TestS3MultipartUploadAbortResponse.java | 21 +-
.../security/TestOMDelegationTokenResponse.java | 8 +
.../volume/TestOMVolumeCreateResponse.java | 8 +
.../volume/TestOMVolumeDeleteResponse.java | 8 +
.../volume/TestOMVolumeSetOwnerResponse.java | 9 +
.../volume/TestOMVolumeSetQuotaResponse.java | 9 +
.../ozone/security/TestOzoneTokenIdentifier.java | 19 +
hadoop-ozone/ozonefs-common/pom.xml | 4 +-
.../fs/ozone/BasicOzoneClientAdapterImpl.java | 23 +-
.../hadoop/fs/ozone/BasicOzoneFileSystem.java | 143 +-
.../ozone/BasicRootedOzoneClientAdapterImpl.java | 138 +-
.../fs/ozone/BasicRootedOzoneFileSystem.java | 180 +-
.../hadoop/fs/ozone/OzoneClientAdapterImpl.java | 4 +-
.../fs/ozone/RootedOzoneClientAdapterImpl.java | 4 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/ozonefs-hadoop2/pom.xml | 6 +-
.../org/apache/hadoop/fs/ozone/RootedOzFs.java | 49 +
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/ozonefs-hadoop3/pom.xml | 6 +-
.../apache/hadoop/fs/ozone/OzoneFileSystem.java | 4 +-
.../org/apache/hadoop/fs/ozone/RootedOzFs.java | 44 +
.../hadoop/fs/ozone/RootedOzoneFileSystem.java | 4 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/ozonefs-shaded/pom.xml | 9 +-
hadoop-ozone/ozonefs/pom.xml | 4 +-
.../apache/hadoop/fs/ozone/OzoneFileSystem.java | 4 +-
.../org/apache/hadoop/fs/ozone/RootedOzFs.java | 44 +
.../hadoop/fs/ozone/RootedOzoneFileSystem.java | 4 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
hadoop-ozone/pom.xml | 33 +-
hadoop-ozone/recon-codegen/pom.xml | 2 +-
.../recon/schema/ContainerSchemaDefinition.java | 4 +-
.../recon/schema/ReconTaskSchemaDefinition.java | 2 +-
.../ozone/recon/schema/StatsSchemaDefinition.java | 14 +-
.../recon/schema/UtilizationSchemaDefinition.java | 23 +-
hadoop-ozone/recon/pom.xml | 2 +-
.../ozone/recon/MetricsServiceProviderFactory.java | 86 +
.../apache/hadoop/ozone/recon/ReconConstants.java | 8 +-
.../hadoop/ozone/recon/ReconControllerModule.java | 13 +-
.../hadoop/ozone/recon/ReconSchemaManager.java | 2 +-
.../hadoop/ozone/recon/ReconServerConfigKeys.java | 50 +-
.../org/apache/hadoop/ozone/recon/ReconUtils.java | 47 +-
.../ozone/recon/api/ClusterStateEndpoint.java | 47 +-
.../hadoop/ozone/recon/api/ContainerEndpoint.java | 2 +-
.../ozone/recon/api/MetricsProxyEndpoint.java | 118 ++
.../hadoop/ozone/recon/api/NodeEndpoint.java | 2 +
.../hadoop/ozone/recon/api/PipelineEndpoint.java | 59 +-
.../ozone/recon/api/types/DatanodeMetadata.java | 28 +
.../ozone/recon/codec/DatanodeDetailsCodec.java | 50 +
.../ozone/recon/codec/ReconNodeDBKeyCodec.java | 46 +
.../hadoop/ozone/recon/codec/package-info.java | 22 +
.../apache/hadoop/ozone/recon/metrics/Metric.java | 51 +
.../ozone/recon/scm/ReconContainerManager.java | 61 +-
.../recon/scm/ReconContainerReportHandler.java | 2 +-
.../hadoop/ozone/recon/scm/ReconDBDefinition.java | 38 -
.../ReconIncrementalContainerReportHandler.java | 6 +-
.../hadoop/ozone/recon/scm/ReconNodeManager.java | 55 +-
.../ozone/recon/scm/ReconSCMDBDefinition.java | 61 +
.../scm/ReconStorageContainerManagerFacade.java | 9 +-
.../ozone/recon/spi/MetricsServiceProvider.java | 60 +
.../spi/impl/ContainerDBServiceProviderImpl.java | 60 +-
.../spi/impl/OzoneManagerServiceProviderImpl.java | 54 +-
.../spi/impl/PrometheusServiceProviderImpl.java | 213 +++
.../recon/spi/impl/ReconContainerDBProvider.java | 28 +-
.../ozone/recon/spi/impl/ReconDBDefinition.java | 71 +
.../ozone/recon/tasks/FileSizeCountTask.java | 4 +-
.../hadoop/ozone/recon/tasks/OMDBUpdateEvent.java | 2 +-
.../ozone/recon/tasks/OMDBUpdatesHandler.java | 92 +-
.../ozone/recon/tasks/ReconTaskControllerImpl.java | 10 +-
.../hadoop/ozone/recon/tasks/TableCountTask.java | 185 ++
.../webapps/recon/ozone-recon-web/api/db.json | 48 +-
.../components/autoReloadPanel/autoReloadPanel.tsx | 4 +-
.../ozone-recon-web/src/types/datanode.types.tsx | 4 +-
.../ozone-recon-web/src/utils/columnSearch.less | 36 +
.../ozone-recon-web/src/utils/columnSearch.tsx | 94 +
.../src/views/datanodes/datanodes.less | 1 +
.../src/views/datanodes/datanodes.tsx | 101 +-
.../src/views/pipelines/pipelines.tsx | 32 +-
.../hadoop/ozone/recon/ReconTestInjector.java | 3 +
.../apache/hadoop/ozone/recon/TestReconUtils.java | 6 +-
.../hadoop/ozone/recon/api/TestEndpoints.java | 151 +-
.../scm/AbstractReconContainerManagerTest.java | 51 +-
.../ozone/recon/scm/TestReconContainerManager.java | 95 +-
...TestReconIncrementalContainerReportHandler.java | 63 +
.../ozone/recon/scm/TestReconNodeManager.java | 20 +-
.../ozone/recon/scm/TestReconPipelineManager.java | 8 +-
.../impl/TestOzoneManagerServiceProviderImpl.java | 9 +-
.../ozone/recon/tasks/TestOMDBUpdatesHandler.java | 54 +-
.../recon/tasks/TestReconTaskControllerImpl.java | 6 +-
.../ozone/recon/tasks/TestTableCountTask.java | 178 ++
.../test/resources/prometheus-test-response.txt | 21 +
hadoop-ozone/s3gateway/pom.xml | 9 +-
.../hadoop/ozone/s3/AWSSignatureProcessor.java | 435 +++++
.../hadoop/ozone/s3/AWSV4SignatureProcessor.java | 426 -----
.../hadoop/ozone/s3/OzoneClientProducer.java | 8 +-
.../hadoop/ozone/s3/S3GatewayConfigKeys.java | 6 +
.../hadoop/ozone/s3/endpoint/BucketEndpoint.java | 8 +-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 69 +-
.../ozone/s3/header/AuthorizationHeaderV2.java | 2 +-
.../ozone/s3/header/AuthorizationHeaderV4.java | 4 +-
.../hadoop/ozone/s3/io/S3WrapperInputStream.java | 36 +-
.../apache/hadoop/ozone/s3/util/RangeHeader.java | 6 +-
.../hadoop/ozone/client/ObjectStoreStub.java | 5 +-
.../hadoop/ozone/client/OzoneVolumeStub.java | 6 +-
.../hadoop/ozone/s3/TestAWSSignatureProcessor.java | 141 ++
.../ozone/s3/TestAWSV4SignatureProcessor.java | 103 -
.../hadoop/ozone/s3/endpoint/TestBucketHead.java | 11 +-
.../hadoop/ozone/s3/endpoint/TestBucketPut.java | 2 +-
.../s3/endpoint/TestMultipartUploadWithCopy.java | 15 +-
.../ozone/s3/header/TestAuthorizationHeaderV4.java | 2 +-
hadoop-ozone/tools/pom.xml | 18 +-
.../org/apache/hadoop/ozone/admin/OzoneAdmin.java | 120 --
.../ozone/admin/om/GetServiceRolesSubcommand.java | 4 +-
.../org/apache/hadoop/ozone/admin/om/OMAdmin.java | 12 +-
.../org/apache/hadoop/ozone/conf/OzoneGetConf.java | 86 +
.../ozone/conf/OzoneManagersCommandHandler.java | 53 +
.../ozone/conf/PrintConfKeyCommandHandler.java | 52 +
.../StorageContainerManagersCommandHandler.java | 52 +
.../org/apache/hadoop/ozone/conf/package-info.java | 21 +
.../apache/hadoop/ozone/debug/ChunkKeyHandler.java | 165 +-
.../hadoop/ozone/debug/ContainerChunkInfo.java | 21 +-
.../hadoop/ozone/debug/DBDefinitionFactory.java | 22 +-
.../org/apache/hadoop/ozone/debug/DBScanner.java | 131 +-
.../org/apache/hadoop/ozone/debug/DropTable.java | 81 +
.../org/apache/hadoop/ozone/debug/ListTables.java | 19 +-
.../org/apache/hadoop/ozone/debug/OzoneDebug.java | 11 +-
.../org/apache/hadoop/ozone/debug/RDBParser.java | 38 +-
.../apache/hadoop/ozone/debug/RocksDBUtils.java | 49 +
.../hadoop/ozone/freon/BaseFreonGenerator.java | 22 +
.../hadoop/ozone/freon/ContentGenerator.java | 31 +-
.../hadoop/ozone/freon/DatanodeChunkValidator.java | 244 +++
.../java/org/apache/hadoop/ozone/freon/Freon.java | 5 +
.../hadoop/ozone/freon/HadoopDirTreeGenerator.java | 55 +-
.../hadoop/ozone/freon/HadoopFsGenerator.java | 12 +-
.../ozone/freon/HadoopNestedDirGenerator.java | 27 +-
.../hadoop/ozone/freon/RandomKeyGenerator.java | 53 +-
.../apache/hadoop/ozone/fsck/ContainerMapper.java | 2 +-
.../hadoop/ozone/segmentparser/RatisLogParser.java | 12 +-
.../hadoop/ozone/shell/ClearSpaceQuotaOptions.java | 43 +
.../apache/hadoop/ozone/shell/OzoneAddress.java | 103 +-
.../org/apache/hadoop/ozone/shell/OzoneShell.java | 14 +-
.../hadoop/ozone/shell/SetSpaceQuotaOptions.java | 44 +
.../java/org/apache/hadoop/ozone/shell/Shell.java | 8 +-
.../hadoop/ozone/shell/bucket/BucketCommands.java | 17 +-
.../ozone/shell/bucket/ClearQuotaHandler.java | 55 +
.../ozone/shell/bucket/CreateBucketHandler.java | 13 +
.../ozone/shell/bucket/LinkBucketHandler.java | 79 +
.../hadoop/ozone/shell/bucket/SetQuotaHandler.java | 62 +
.../hadoop/ozone/shell/keys/KeyCommands.java | 12 +-
.../hadoop/ozone/shell/token/GetTokenHandler.java | 8 +-
.../ozone/shell/token/PrintTokenHandler.java | 3 +-
.../ozone/shell/token/RenewTokenHandler.java | 4 +-
.../hadoop/ozone/shell/token/TokenCommands.java | 16 +-
.../hadoop/ozone/shell/token/TokenOption.java | 38 +-
.../ozone/shell/volume/ClearQuotaHandler.java | 53 +
.../ozone/shell/volume/CreateVolumeHandler.java | 20 +-
.../hadoop/ozone/shell/volume/SetQuotaHandler.java | 61 +
.../ozone/shell/volume/UpdateVolumeHandler.java | 11 -
.../hadoop/ozone/shell/volume/VolumeCommands.java | 16 +-
.../services/org.apache.hadoop.fs.FileSystem | 1 +
.../hadoop/ozone/conf/TestGetConfOptions.java | 90 +
.../ozone/debug/TestDBDefinitionFactory.java | 59 +
.../hadoop/ozone/freon/TestContentGenerator.java | 82 +
.../TestGenerateOzoneRequiredConfigurations.java | 5 +-
.../hadoop/ozone/shell/TestOzoneAddress.java | 6 +-
.../shell/TestOzoneAddressClientCreation.java | 172 ++
hadoop-ozone/upgrade/pom.xml | 57 -
.../org/apache/hadoop/ozone/upgrade/Balance.java | 38 -
.../org/apache/hadoop/ozone/upgrade/Execute.java | 37 -
.../hadoop/ozone/upgrade/InPlaceUpgrade.java | 45 -
.../java/org/apache/hadoop/ozone/upgrade/Plan.java | 38 -
.../apache/hadoop/ozone/upgrade/package-info.java | 23 -
pom.xml | 93 +-
999 files changed, 37896 insertions(+), 15714 deletions(-)
diff --cc hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 8ed5ab6,a7aca16..03da6dd
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@@ -365,11 -379,6 +379,13 @@@ public final class OzoneConsts
public static final String CONTAINER_DB_TYPE_ROCKSDB = "RocksDB";
public static final String CONTAINER_DB_TYPE_LEVELDB = "LevelDB";
+ // SCM HA
+ public static final String SCM_SERVICE_ID_DEFAULT = "scmServiceIdDefault";
+
+ // SCM Ratis snapshot file to store the last applied index
+ public static final String SCM_RATIS_SNAPSHOT_INDEX = "scmRatisSnapshotIndex";
+
+ public static final String SCM_RATIS_SNAPSHOT_TERM = "scmRatisSnapshotTerm";
+ // An on-disk transient marker file used when replacing DB with checkpoint
+ public static final String DB_TRANSIENT_MARKER = "dbInconsistentMarker";
}
diff --cc hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
index eecb512,653300e..816e1b9
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigurationReflectionUtil.java
@@@ -92,9 -94,9 +94,12 @@@ public final class ConfigurationReflect
forcedFieldSet(field, configuration,
from.getTimeDuration(key, "0s", configAnnotation.timeUnit()));
break;
+ case SIZE:
+ forcedFieldSet(field, configuration,
+ from.getStorageSize(key, "0B", configAnnotation.sizeUnit()));
+ case CLASS:
+ forcedFieldSet(field, configuration,
+ from.getClass(key, Object.class));
break;
default:
throw new ConfigurationException(
diff --cc hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 9bf687d,3cf12e7..cbd713c
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@@ -50,14 -42,8 +50,15 @@@ import org.apache.hadoop.hdds.conf.Conf
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
+ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl;
+import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
+import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
+import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer;
+import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisSnapshotInfo;
+import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.block.BlockManager;
@@@ -96,7 -82,8 +97,8 @@@ import org.apache.hadoop.hdds.scm.node.
import org.apache.hadoop.hdds.scm.pipeline.PipelineActionHandler;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.scm.pipeline.PipelineReportHandler;
-import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManagerV2Impl;
+ import org.apache.hadoop.hdds.scm.pipeline.choose.algorithms.PipelineChoosePolicyFactory;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
import org.apache.hadoop.hdds.security.exception.SCMSecurityException;
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
@@@ -1175,56 -1119,6 +1180,56 @@@ public final class StorageContainerMana
return this.clusterMap;
}
+ private static SCMRatisServer initializeRatisServer(
+ OzoneConfiguration conf, StorageContainerManager scm) throws IOException {
+ SCMNodeDetails scmNodeDetails = SCMNodeDetails
+ .initStandAlone(conf);
+ //TODO enable Ratis group
+ SCMRatisServer scmRatisServer = SCMRatisServer.newSCMRatisServer(
+ conf.getObject(SCMRatisServer.SCMRatisServerConfiguration.class),
+ scm, scmNodeDetails, Collections.EMPTY_LIST,
+ SCMRatisServer.getSCMRatisDirectory(conf));
+ if (scmRatisServer != null) {
+ LOG.info("SCM Ratis server initialized at port {}",
+ scmRatisServer.getServerPort());
+ } // TODO error handling for scmRatisServer creation failure
+ return scmRatisServer;
+ }
+
+ @VisibleForTesting
+ public SCMRatisServer getScmRatisServer() {
+ return scmRatisServer;
+ }
+
+ public void setScmRatisServer(SCMRatisServer scmRatisServer) {
+ this.scmRatisServer = scmRatisServer;
+ }
+
+ @VisibleForTesting
+ public SCMRatisSnapshotInfo getSnapshotInfo() {
+ return scmRatisSnapshotInfo;
+ }
+
+ @VisibleForTesting
+ public long getRatisSnapshotIndex() {
+ return scmRatisSnapshotInfo.getIndex();
+ }
+
+ /**
+ * Save ratis snapshot to SCM meta store and local disk.
+ */
+ public TermIndex saveRatisSnapshot() throws IOException {
+ TermIndex snapshotIndex = scmRatisServer.getLastAppliedTermIndex();
+ if (scmMetadataStore != null) {
+ // Flush the SCM state to disk
- scmMetadataStore.getStore().flush();
++ scmMetadataStore.getStore().flushDB();
+ }
+
+ scmRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex);
+
+ return snapshotIndex;
+ }
+
/**
* Get the safe mode status of all rules.
*
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 597a317,2b492a2..90f4996
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@@ -26,9 -26,6 +26,12 @@@ import static org.apache.hadoop.hdds.Hd
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
import static org.junit.Assert.fail;
++<<<<<<< HEAD
+
+import org.apache.hadoop.hdds.scm.TestUtils;
+import org.junit.Ignore;
++=======
++>>>>>>> master
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
diff --cc hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
index 18b9c4c,7a28ba5..3ac5ad8
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/freon/TestFreonWithDatanodeRestart.java
@@@ -74,15 -74,13 +74,12 @@@ public class TestFreonWithDatanodeResta
ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(3));
conf.setFromObject(ratisServerConfig);
- conf.setTimeDuration(
- RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
- "rpc.request.timeout",
- 3, TimeUnit.SECONDS);
- conf.setTimeDuration(
- RatisHelper.HDDS_DATANODE_RATIS_CLIENT_PREFIX_KEY+ "." +
- "watch.request.timeout",
- 3, TimeUnit.SECONDS);
- conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 5);
-
+ RatisClientConfig.RaftConfig raftClientConfig =
+ conf.getObject(RatisClientConfig.RaftConfig.class);
+ raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
+ raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(3));
+ conf.setFromObject(raftClientConfig);
+
cluster = MiniOzoneCluster.newBuilder(conf)
.setHbProcessorInterval(1000)
.setHbInterval(1000)
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 02/11: HDDS-3837. Add isLeader check in SCMHAManager.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 58394ebdd22db72b8f51a7ed700ad0c54eff4e3d
Author: Li Cheng <ti...@tencent.com>
AuthorDate: Sat Oct 24 20:55:36 2020 +0530
HDDS-3837. Add isLeader check in SCMHAManager.
---
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 5 +-
.../scm/container/CloseContainerEventHandler.java | 4 +-
.../apache/hadoop/hdds/scm/ha/SCMHAManager.java | 13 ++
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 85 ++++++++++-
.../org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 6 +-
.../apache/hadoop/hdds/scm/ha/SCMRatisServer.java | 10 ++
.../hadoop/hdds/scm/ha/SCMRatisServerImpl.java | 15 ++
.../hadoop/hdds/scm/node/NewNodeHandler.java | 12 +-
.../scm/node/NonHealthyToHealthyNodeHandler.java | 12 +-
.../scm/pipeline/BackgroundPipelineCreator.java | 2 +-
.../hadoop/hdds/scm/pipeline/PipelineManager.java | 5 +-
.../hdds/scm/pipeline/PipelineManagerMXBean.java | 3 +-
.../hdds/scm/pipeline/PipelineManagerV2Impl.java | 65 +++++++--
.../hadoop/hdds/scm/ha/MockSCMHAManager.java | 53 ++++++-
.../scm/pipeline/TestPipelineActionHandler.java | 3 +-
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 156 ++++++++++++++++++---
.../hdds/scm/safemode/TestSCMSafeModeManager.java | 2 +-
17 files changed, 401 insertions(+), 50 deletions(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index b5b2aaf..ec0094b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -57,6 +57,8 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVI
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
+
+import org.apache.ratis.protocol.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -256,7 +258,8 @@ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean {
* @param containerInfo - Container Info.
* @return AllocatedBlock
*/
- private AllocatedBlock newBlock(ContainerInfo containerInfo) {
+ private AllocatedBlock newBlock(ContainerInfo containerInfo)
+ throws NotLeaderException {
try {
final Pipeline pipeline = pipelineManager
.getPipeline(containerInfo.getPipelineID());
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index fd73711..a2b79fb 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.ratis.protocol.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -98,7 +99,7 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
* @throws ContainerNotFoundException
*/
private List<DatanodeDetails> getNodes(final ContainerInfo container)
- throws ContainerNotFoundException {
+ throws ContainerNotFoundException, NotLeaderException {
try {
return pipelineManager.getPipeline(container.getPipelineID()).getNodes();
} catch (PipelineNotFoundException ex) {
@@ -109,5 +110,4 @@ public class CloseContainerEventHandler implements EventHandler<ContainerID> {
.collect(Collectors.toList());
}
}
-
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
index eb6c800..ade0ad9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
@@ -17,6 +17,9 @@
package org.apache.hadoop.hdds.scm.ha;
+import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.RaftPeer;
+
import java.io.IOException;
/**
@@ -40,7 +43,17 @@ public interface SCMHAManager {
SCMRatisServer getRatisServer();
/**
+ * Returns suggested leader from RaftServer.
+ */
+ RaftPeer getSuggestedLeader();
+
+ /**
* Stops the HA service.
*/
void shutdown() throws IOException;
+
+ /**
+ * Returns NotLeaderException with useful info.
+ */
+ NotLeaderException triggerNotLeaderException();
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
index 89ac714..8bb9457 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
@@ -17,7 +17,17 @@
package org.apache.hadoop.hdds.scm.ha;
+import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.RaftGroupMemberId;
+import org.apache.ratis.protocol.RaftPeer;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.server.RaftServer;
+import org.apache.ratis.server.impl.RaftServerImpl;
+import org.apache.ratis.server.impl.RaftServerProxy;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import java.io.IOException;
@@ -31,14 +41,17 @@ import java.io.IOException;
*/
public class SCMHAManagerImpl implements SCMHAManager {
- private static boolean isLeader = true;
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SCMHAManagerImpl.class);
private final SCMRatisServerImpl ratisServer;
+ private final ConfigurationSource conf;
/**
* Creates SCMHAManager instance.
*/
public SCMHAManagerImpl(final ConfigurationSource conf) throws IOException {
+ this.conf = conf;
this.ratisServer = new SCMRatisServerImpl(
conf.getObject(SCMHAConfiguration.class), conf);
}
@@ -56,7 +69,28 @@ public class SCMHAManagerImpl implements SCMHAManager {
*/
@Override
public boolean isLeader() {
- return isLeader;
+ if (!SCMHAUtils.isSCMHAEnabled(conf)) {
+ // When SCM HA is not enabled, the current SCM is always the leader.
+ return true;
+ }
+ RaftServer server = ratisServer.getServer();
+ Preconditions.checkState(server instanceof RaftServerProxy);
+ RaftServerImpl serverImpl = null;
+ try {
+ // SCM only has one raft group.
+ serverImpl = ((RaftServerProxy) server)
+ .getImpl(ratisServer.getRaftGroupId());
+ if (serverImpl != null) {
+ // Only when it's sure the current SCM is the leader, otherwise
+ // it should all return false.
+ return serverImpl.isLeader();
+ }
+ } catch (IOException ioe) {
+ LOG.error("Fail to get RaftServer impl and therefore it's not clear " +
+ "whether it's leader. ", ioe);
+ }
+
+ return false;
}
/**
@@ -67,6 +101,42 @@ public class SCMHAManagerImpl implements SCMHAManager {
return ratisServer;
}
+ private RaftPeerId getPeerIdFromRoleInfo(RaftServerImpl serverImpl) {
+ if (serverImpl.isLeader()) {
+ return RaftPeerId.getRaftPeerId(
+ serverImpl.getRoleInfoProto().getLeaderInfo().toString());
+ } else if (serverImpl.isFollower()) {
+ return RaftPeerId.valueOf(
+ serverImpl.getRoleInfoProto().getFollowerInfo()
+ .getLeaderInfo().getId().getId());
+ } else {
+ return null;
+ }
+ }
+
+ @Override
+ public RaftPeer getSuggestedLeader() {
+ RaftServer server = ratisServer.getServer();
+ Preconditions.checkState(server instanceof RaftServerProxy);
+ RaftServerImpl serverImpl = null;
+ try {
+ // SCM only has one raft group.
+ serverImpl = ((RaftServerProxy) server)
+ .getImpl(ratisServer.getRaftGroupId());
+ if (serverImpl != null) {
+ RaftPeerId peerId = getPeerIdFromRoleInfo(serverImpl);
+ if (peerId != null) {
+ return new RaftPeer(peerId);
+ }
+ return null;
+ }
+ } catch (IOException ioe) {
+ LOG.error("Fail to get RaftServer impl and therefore it's not clear " +
+ "whether it's leader. ", ioe);
+ }
+ return null;
+ }
+
/**
* {@inheritDoc}
*/
@@ -75,4 +145,15 @@ public class SCMHAManagerImpl implements SCMHAManager {
ratisServer.stop();
}
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public NotLeaderException triggerNotLeaderException() {
+ return new NotLeaderException(RaftGroupMemberId.valueOf(
+ ratisServer.getServer().getId(),
+ ratisServer.getRaftGroupId()),
+ getSuggestedLeader(),
+ ratisServer.getRaftPeers());
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
index eb22566..0f71744 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.hdds.scm.ha;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.ScmUtils;
import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer;
@@ -37,12 +37,12 @@ public final class SCMHAUtils {
}
// Check if SCM HA is enabled.
- public static boolean isSCMHAEnabled(OzoneConfiguration conf) {
+ public static boolean isSCMHAEnabled(ConfigurationSource conf) {
return conf.getBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY,
ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT);
}
- public static File createSCMRatisDir(OzoneConfiguration conf)
+ public static File createSCMRatisDir(ConfigurationSource conf)
throws IllegalArgumentException {
String scmRatisDir = SCMRatisServer.getSCMRatisDirectory(conf);
if (scmRatisDir == null || scmRatisDir.isEmpty()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java
index 4ddbc7b..2f99776 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServer.java
@@ -18,8 +18,12 @@
package org.apache.hadoop.hdds.scm.ha;
import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.ratis.protocol.RaftGroupId;
+import org.apache.ratis.protocol.RaftPeer;
+import org.apache.ratis.server.RaftServer;
import java.io.IOException;
+import java.util.List;
import java.util.concurrent.ExecutionException;
/**
@@ -35,4 +39,10 @@ public interface SCMRatisServer {
throws IOException, ExecutionException, InterruptedException;
void stop() throws IOException;
+
+ RaftServer getServer();
+
+ RaftGroupId getRaftGroupId();
+
+ List<RaftPeer> getRaftPeers();
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
index 45ae212..33ae109 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
@@ -21,6 +21,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
@@ -110,4 +111,18 @@ public class SCMRatisServerImpl implements SCMRatisServer {
server.close();
}
+ @Override
+ public RaftServer getServer() {
+ return server;
+ }
+
+ @Override
+ public RaftGroupId getRaftGroupId() {
+ return raftGroupId;
+ }
+
+ @Override
+ public List<RaftPeer> getRaftPeers() {
+ return Collections.singletonList(new RaftPeer(raftPeerId));
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
index a40a63a..42cada9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
@@ -23,11 +23,16 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.ratis.protocol.NotLeaderException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Handles New Node event.
*/
public class NewNodeHandler implements EventHandler<DatanodeDetails> {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(NewNodeHandler.class);
private final PipelineManager pipelineManager;
private final ConfigurationSource conf;
@@ -41,6 +46,11 @@ public class NewNodeHandler implements EventHandler<DatanodeDetails> {
@Override
public void onMessage(DatanodeDetails datanodeDetails,
EventPublisher publisher) {
- pipelineManager.triggerPipelineCreation();
+ try {
+ pipelineManager.triggerPipelineCreation();
+ } catch (NotLeaderException ex) {
+ LOG.debug("Not the current leader SCM and cannot start pipeline" +
+ " creation.");
+ }
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
index cc32f84..e73231b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
@@ -23,12 +23,17 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.ratis.protocol.NotLeaderException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* Handles Stale node event.
*/
public class NonHealthyToHealthyNodeHandler
implements EventHandler<DatanodeDetails> {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(NonHealthyToHealthyNodeHandler.class);
private final PipelineManager pipelineManager;
private final ConfigurationSource conf;
@@ -42,6 +47,11 @@ public class NonHealthyToHealthyNodeHandler
@Override
public void onMessage(DatanodeDetails datanodeDetails,
EventPublisher publisher) {
- pipelineManager.triggerPipelineCreation();
+ try {
+ pipelineManager.triggerPipelineCreation();
+ } catch (NotLeaderException ex) {
+ LOG.debug("Not the current leader SCM and cannot start pipeline" +
+ " creation.");
+ }
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
index f240293..42b3a93 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/BackgroundPipelineCreator.java
@@ -103,7 +103,7 @@ class BackgroundPipelineCreator {
}
}
- private void createPipelines() {
+ private void createPipelines() throws RuntimeException {
// TODO: #CLUTIL Different replication factor may need to be supported
HddsProtos.ReplicationType type = HddsProtos.ReplicationType.valueOf(
conf.get(OzoneConfigKeys.OZONE_REPLICATION_TYPE,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index 9c997a8..ddd461b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.ratis.protocol.NotLeaderException;
/**
* Interface which exposes the api for pipeline management.
@@ -55,7 +56,7 @@ public interface PipelineManager extends Closeable, PipelineManagerMXBean,
ReplicationFactor factor);
List<Pipeline> getPipelines(ReplicationType type,
- Pipeline.PipelineState state);
+ Pipeline.PipelineState state) throws NotLeaderException;
List<Pipeline> getPipelines(ReplicationType type,
ReplicationFactor factor, Pipeline.PipelineState state);
@@ -84,7 +85,7 @@ public interface PipelineManager extends Closeable, PipelineManagerMXBean,
void startPipelineCreator();
- void triggerPipelineCreation();
+ void triggerPipelineCreation() throws NotLeaderException;
void incNumBlocksAllocatedMetric(PipelineID id);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
index 6d7d717..55e096b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.pipeline;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
+import org.apache.ratis.protocol.NotLeaderException;
import java.util.Map;
@@ -33,6 +34,6 @@ public interface PipelineManagerMXBean {
* Returns the number of pipelines in different state.
* @return state to number of pipeline map
*/
- Map<String, Integer> getPipelineInfo();
+ Map<String, Integer> getPipelineInfo() throws NotLeaderException;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
index 1241745..069540c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.utils.Scheduler;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.Time;
+import org.apache.ratis.protocol.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -75,19 +76,21 @@ public final class PipelineManagerV2Impl implements PipelineManager {
private final SCMPipelineMetrics metrics;
private long pipelineWaitDefaultTimeout;
private final AtomicBoolean isInSafeMode;
+ private SCMHAManager scmhaManager;
// Used to track if the safemode pre-checks have completed. This is designed
// to prevent pipelines being created until sufficient nodes have registered.
private final AtomicBoolean pipelineCreationAllowed;
private PipelineManagerV2Impl(ConfigurationSource conf,
- NodeManager nodeManager,
- StateManager pipelineStateManager,
- PipelineFactory pipelineFactory,
+ SCMHAManager scmhaManager,
+ StateManager pipelineStateManager,
+ PipelineFactory pipelineFactory,
EventPublisher eventPublisher) {
this.lock = new ReentrantReadWriteLock();
this.pipelineFactory = pipelineFactory;
this.stateManager = pipelineStateManager;
this.conf = conf;
+ this.scmhaManager = scmhaManager;
this.eventPublisher = eventPublisher;
this.pmInfoBean = MBeans.register("SCMPipelineManager",
"SCMPipelineManagerInfo", this);
@@ -120,7 +123,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
nodeManager, stateManager, conf, eventPublisher);
// Create PipelineManager
PipelineManagerV2Impl pipelineManager = new PipelineManagerV2Impl(conf,
- nodeManager, stateManager, pipelineFactory, eventPublisher);
+ scmhaManager, stateManager, pipelineFactory, eventPublisher);
// Create background thread.
Scheduler scheduler = new Scheduler(
@@ -136,6 +139,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public Pipeline createPipeline(ReplicationType type,
ReplicationFactor factor) throws IOException {
+ checkLeader();
if (!isPipelineCreationAllowed() && factor != ReplicationFactor.ONE) {
LOG.debug("Pipeline creation is not allowed until safe mode prechecks " +
"complete");
@@ -266,6 +270,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void addContainerToPipeline(
PipelineID pipelineID, ContainerID containerID) throws IOException {
+ checkLeader();
lock.writeLock().lock();
try {
stateManager.addContainerToPipeline(pipelineID, containerID);
@@ -277,6 +282,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void removeContainerFromPipeline(
PipelineID pipelineID, ContainerID containerID) throws IOException {
+ checkLeader();
lock.writeLock().lock();
try {
stateManager.removeContainerFromPipeline(pipelineID, containerID);
@@ -288,6 +294,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public NavigableSet<ContainerID> getContainersInPipeline(
PipelineID pipelineID) throws IOException {
+ checkLeader();
lock.readLock().lock();
try {
return stateManager.getContainers(pipelineID);
@@ -298,11 +305,13 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public int getNumberOfContainers(PipelineID pipelineID) throws IOException {
+ checkLeader();
return stateManager.getNumberOfContainers(pipelineID);
}
@Override
public void openPipeline(PipelineID pipelineId) throws IOException {
+ checkLeader();
lock.writeLock().lock();
try {
Pipeline pipeline = stateManager.getPipeline(pipelineId);
@@ -328,6 +337,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
* @throws IOException
*/
protected void removePipeline(Pipeline pipeline) throws IOException {
+ checkLeader();
pipelineFactory.close(pipeline.getType(), pipeline);
PipelineID pipelineID = pipeline.getId();
lock.writeLock().lock();
@@ -349,6 +359,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
*/
protected void closeContainersForPipeline(final PipelineID pipelineId)
throws IOException {
+ checkLeader();
Set<ContainerID> containerIDs = stateManager.getContainers(pipelineId);
for (ContainerID containerID : containerIDs) {
eventPublisher.fireEvent(SCMEvents.CLOSE_CONTAINER, containerID);
@@ -364,6 +375,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void closePipeline(Pipeline pipeline, boolean onTimeout)
throws IOException {
+ checkLeader();
PipelineID pipelineID = pipeline.getId();
lock.writeLock().lock();
try {
@@ -393,6 +405,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void scrubPipeline(ReplicationType type, ReplicationFactor factor)
throws IOException {
+ checkLeader();
if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) {
// Only srub pipeline for RATIS THREE pipeline
return;
@@ -439,7 +452,9 @@ public final class PipelineManagerV2Impl implements PipelineManager {
* Triggers pipeline creation after the specified time.
*/
@Override
- public void triggerPipelineCreation() {
+ public void triggerPipelineCreation() throws NotLeaderException {
+ // TODO add checkLeader once follower validates safemode
+ // before it becomes leader.
backgroundPipelineCreator.triggerPipelineCreation();
}
@@ -457,6 +472,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void activatePipeline(PipelineID pipelineID)
throws IOException {
+ checkLeader();
stateManager.updatePipelineState(pipelineID.getProtobuf(),
HddsProtos.PipelineState.PIPELINE_OPEN);
}
@@ -470,6 +486,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void deactivatePipeline(PipelineID pipelineID)
throws IOException {
+ checkLeader();
stateManager.updatePipelineState(pipelineID.getProtobuf(),
HddsProtos.PipelineState.PIPELINE_DORMANT);
}
@@ -484,6 +501,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
@Override
public void waitPipelineReady(PipelineID pipelineID, long timeout)
throws IOException {
+ checkLeader();
long st = Time.monotonicNow();
if (timeout == 0) {
timeout = pipelineWaitDefaultTimeout;
@@ -515,7 +533,8 @@ public final class PipelineManagerV2Impl implements PipelineManager {
}
@Override
- public Map<String, Integer> getPipelineInfo() {
+ public Map<String, Integer> getPipelineInfo() throws NotLeaderException {
+ checkLeader();
final Map<String, Integer> pipelineInfo = new HashMap<>();
for (Pipeline.PipelineState state : Pipeline.PipelineState.values()) {
pipelineInfo.put(state.toString(), 0);
@@ -564,13 +583,21 @@ public final class PipelineManagerV2Impl implements PipelineManager {
// Trigger pipeline creation only if the preCheck status has changed to
// complete.
- if (isPipelineCreationAllowed() && !currentAllowPipelines) {
- triggerPipelineCreation();
- }
- // Start the pipeline creation thread only when safemode switches off
- if (!getSafeModeStatus() && currentlyInSafeMode) {
- startPipelineCreator();
+
+ try {
+ if (isPipelineCreationAllowed() && !currentAllowPipelines) {
+ triggerPipelineCreation();
+ }
+ // Start the pipeline creation thread only when safemode switches off
+ if (!getSafeModeStatus() && currentlyInSafeMode) {
+ startPipelineCreator();
+ }
+ } catch (NotLeaderException ex) {
+ LOG.warn("Not the current leader SCM and cannot process pipeline" +
+ " creation. Suggested leader is: ",
+ scmhaManager.getSuggestedLeader().getAddress());
}
+
}
@VisibleForTesting
@@ -593,6 +620,20 @@ public final class PipelineManagerV2Impl implements PipelineManager {
public StateManager getStateManager() {
return stateManager;
}
+
+ public void setScmhaManager(SCMHAManager scmhaManager) {
+ this.scmhaManager = scmhaManager;
+ }
+
+ /**
+ * Check if scm is current leader.
+ * @throws NotLeaderException when it's not the current leader.
+ */
+ private void checkLeader() throws NotLeaderException {
+ if (!scmhaManager.isLeader()) {
+ throw scmhaManager.triggerNotLeaderException();
+ }
+ }
private void setBackgroundPipelineCreator(
BackgroundPipelineCreator backgroundPipelineCreator) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
index c3b14fb..ce48c11 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
@@ -28,11 +28,14 @@ import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
import org.apache.ratis.protocol.ClientId;
import org.apache.ratis.protocol.Message;
+import org.apache.ratis.protocol.NotLeaderException;
import org.apache.ratis.protocol.RaftClientReply;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftGroupMemberId;
+import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.protocol.StateMachineException;
+import org.apache.ratis.server.RaftServer;
/**
* Mock SCMHAManager implementation for testing.
@@ -40,16 +43,30 @@ import org.apache.ratis.protocol.StateMachineException;
public final class MockSCMHAManager implements SCMHAManager {
private final SCMRatisServer ratisServer;
+ private boolean isLeader;
public static SCMHAManager getInstance() {
return new MockSCMHAManager();
}
+ public static SCMHAManager getLeaderInstance() {
+ MockSCMHAManager mockSCMHAManager = new MockSCMHAManager();
+ mockSCMHAManager.setIsLeader(true);
+ return mockSCMHAManager;
+ }
+
+ public static SCMHAManager getFollowerInstance() {
+ MockSCMHAManager mockSCMHAManager = new MockSCMHAManager();
+ mockSCMHAManager.setIsLeader(false);
+ return mockSCMHAManager;
+ }
+
/**
* Creates MockSCMHAManager instance.
*/
private MockSCMHAManager() {
this.ratisServer = new MockRatisServer();
+ this.isLeader = true;
}
@Override
@@ -62,7 +79,16 @@ public final class MockSCMHAManager implements SCMHAManager {
*/
@Override
public boolean isLeader() {
- return true;
+ return isLeader;
+ }
+
+ public void setIsLeader(boolean isLeader) {
+ this.isLeader = isLeader;
+ }
+
+ @Override
+ public RaftPeer getSuggestedLeader() {
+ throw new UnsupportedOperationException();
}
/**
@@ -81,6 +107,16 @@ public final class MockSCMHAManager implements SCMHAManager {
ratisServer.stop();
}
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public NotLeaderException triggerNotLeaderException() {
+ return new NotLeaderException(RaftGroupMemberId.valueOf(
+ RaftPeerId.valueOf("peer"), RaftGroupId.randomId()),
+ null, new ArrayList<>());
+ }
+
private static class MockRatisServer implements SCMRatisServer {
private Map<RequestType, Object> handlers =
@@ -141,6 +177,21 @@ public final class MockSCMHAManager implements SCMHAManager {
}
@Override
+ public RaftServer getServer() {
+ return null;
+ }
+
+ @Override
+ public RaftGroupId getRaftGroupId() {
+ return null;
+ }
+
+ @Override
+ public List<RaftPeer> getRaftPeers() {
+ return new ArrayList<>();
+ }
+
+ @Override
public void stop() {
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
index 99443c3..e40c8ba 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.ratis.protocol.NotLeaderException;
import org.junit.Test;
import org.mockito.Mockito;
@@ -37,7 +38,7 @@ public class TestPipelineActionHandler {
@Test
public void testCloseActionForMissingPipeline()
- throws PipelineNotFoundException {
+ throws PipelineNotFoundException, NotLeaderException {
final PipelineManager manager = Mockito.mock(PipelineManager.class);
final EventQueue queue = Mockito.mock(EventQueue.class);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index e1f9104..a8f03bb 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.ha.MockSCMHAManager;
+import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher;
@@ -37,6 +38,7 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.ratis.protocol.NotLeaderException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -66,7 +68,6 @@ public class TestPipelineManagerImpl {
private DBStore dbStore;
private static MockNodeManager nodeManager;
private static int maxPipelineCount;
- private static EventQueue eventQueue;
@Before
public void init() throws Exception {
@@ -76,7 +77,6 @@ public class TestPipelineManagerImpl {
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
dbStore = DBStoreBuilder.createDBStore(conf, new SCMDBDefinition());
nodeManager = new MockNodeManager(true, 20);
- eventQueue = new EventQueue();
maxPipelineCount = nodeManager.getNodeCount(HddsProtos.NodeState.HEALTHY) *
conf.getInt(OZONE_DATANODE_PIPELINE_LIMIT,
OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) /
@@ -91,17 +91,23 @@ public class TestPipelineManagerImpl {
FileUtil.fullyDelete(testDir);
}
- private PipelineManagerV2Impl createPipelineManager()
+ private PipelineManagerV2Impl createPipelineManager(boolean leader)
throws IOException {
- return PipelineManagerV2Impl.newPipelineManager(
- conf, MockSCMHAManager.getInstance(),
- nodeManager,
- SCMDBDefinition.PIPELINES.getTable(dbStore), eventQueue);
+ SCMHAManager scmhaManager;
+ if (leader) {
+ scmhaManager = MockSCMHAManager.getLeaderInstance();
+ } else {
+ scmhaManager = MockSCMHAManager.getFollowerInstance();
+ }
+ return PipelineManagerV2Impl.newPipelineManager(conf, scmhaManager,
+ new MockNodeManager(true, 20),
+ SCMDBDefinition.PIPELINES.getTable(dbStore),
+ new EventQueue());
}
@Test
public void testCreatePipeline() throws Exception {
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
Assert.assertTrue(pipelineManager.getPipelines().isEmpty());
pipelineManager.allowPipelineCreation();
Pipeline pipeline1 = pipelineManager.createPipeline(
@@ -115,7 +121,7 @@ public class TestPipelineManagerImpl {
Assert.assertTrue(pipelineManager.containsPipeline(pipeline2.getId()));
pipelineManager.close();
- PipelineManagerV2Impl pipelineManager2 = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager2 = createPipelineManager(true);
// Should be able to load previous pipelines.
Assert.assertFalse(pipelineManager.getPipelines().isEmpty());
Assert.assertEquals(2, pipelineManager.getPipelines().size());
@@ -129,8 +135,24 @@ public class TestPipelineManagerImpl {
}
@Test
+ public void testCreatePipelineShouldFailOnFollower() throws Exception {
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(false);
+ Assert.assertTrue(pipelineManager.getPipelines().isEmpty());
+ pipelineManager.allowPipelineCreation();
+ try {
+ pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE);
+ } catch (NotLeaderException ex) {
+ pipelineManager.close();
+ return;
+ }
+ // Should not reach here.
+ Assert.fail();
+ }
+
+ @Test
public void testUpdatePipelineStates() throws Exception {
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
pipelineManager.allowPipelineCreation();
Pipeline pipeline = pipelineManager.createPipeline(
HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
@@ -164,8 +186,71 @@ public class TestPipelineManagerImpl {
}
@Test
+ public void testOpenPipelineShouldFailOnFollower() throws Exception {
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+ pipelineManager.allowPipelineCreation();
+ Pipeline pipeline = pipelineManager.createPipeline(
+ HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+ Assert.assertEquals(1, pipelineManager.getPipelines().size());
+ Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+ Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+ // Change to follower
+ pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance());
+ try {
+ pipelineManager.openPipeline(pipeline.getId());
+ } catch (NotLeaderException ex) {
+ pipelineManager.close();
+ return;
+ }
+ // Should not reach here.
+ Assert.fail();
+ }
+
+ @Test
+ public void testActivatePipelineShouldFailOnFollower() throws Exception {
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+ pipelineManager.allowPipelineCreation();
+ Pipeline pipeline = pipelineManager.createPipeline(
+ HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+ Assert.assertEquals(1, pipelineManager.getPipelines().size());
+ Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+ Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+ // Change to follower
+ pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance());
+ try {
+ pipelineManager.activatePipeline(pipeline.getId());
+ } catch (NotLeaderException ex) {
+ pipelineManager.close();
+ return;
+ }
+ // Should not reach here.
+ Assert.fail();
+ }
+
+ @Test
+ public void testDeactivatePipelineShouldFailOnFollower() throws Exception {
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+ pipelineManager.allowPipelineCreation();
+ Pipeline pipeline = pipelineManager.createPipeline(
+ HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+ Assert.assertEquals(1, pipelineManager.getPipelines().size());
+ Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+ Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+ // Change to follower
+ pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance());
+ try {
+ pipelineManager.deactivatePipeline(pipeline.getId());
+ } catch (NotLeaderException ex) {
+ pipelineManager.close();
+ return;
+ }
+ // Should not reach here.
+ Assert.fail();
+ }
+
+ @Test
public void testRemovePipeline() throws Exception {
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
pipelineManager.allowPipelineCreation();
// Create a pipeline
Pipeline pipeline = pipelineManager.createPipeline(
@@ -207,12 +292,33 @@ public class TestPipelineManagerImpl {
}
@Test
+ public void testClosePipelineShouldFailOnFollower() throws Exception {
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
+ pipelineManager.allowPipelineCreation();
+ Pipeline pipeline = pipelineManager.createPipeline(
+ HddsProtos.ReplicationType.RATIS, HddsProtos.ReplicationFactor.THREE);
+ Assert.assertEquals(1, pipelineManager.getPipelines().size());
+ Assert.assertTrue(pipelineManager.containsPipeline(pipeline.getId()));
+ Assert.assertEquals(ALLOCATED, pipeline.getPipelineState());
+ // Change to follower
+ pipelineManager.setScmhaManager(MockSCMHAManager.getFollowerInstance());
+ try {
+ pipelineManager.closePipeline(pipeline, false);
+ } catch (NotLeaderException ex) {
+ pipelineManager.close();
+ return;
+ }
+ // Should not reach here.
+ Assert.fail();
+ }
+
+ @Test
public void testPipelineReport() throws Exception {
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
pipelineManager.allowPipelineCreation();
SCMSafeModeManager scmSafeModeManager =
new SCMSafeModeManager(conf, new ArrayList<>(), pipelineManager,
- eventQueue);
+ new EventQueue());
Pipeline pipeline = pipelineManager
.createPipeline(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
@@ -258,7 +364,7 @@ public class TestPipelineManagerImpl {
@Test
public void testPipelineCreationFailedMetric() throws Exception {
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
pipelineManager.allowPipelineCreation();
// No pipeline at start
@@ -313,7 +419,7 @@ public class TestPipelineManagerImpl {
@Test
public void testPipelineOpenOnlyWhenLeaderReported() throws Exception {
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
pipelineManager.allowPipelineCreation();
pipelineManager.onMessage(
@@ -324,13 +430,13 @@ public class TestPipelineManagerImpl {
// close manager
pipelineManager.close();
// new pipeline manager loads the pipelines from the db in ALLOCATED state
- pipelineManager = createPipelineManager();
+ pipelineManager = createPipelineManager(true);
Assert.assertEquals(Pipeline.PipelineState.ALLOCATED,
pipelineManager.getPipeline(pipeline.getId()).getPipelineState());
SCMSafeModeManager scmSafeModeManager =
new SCMSafeModeManager(new OzoneConfiguration(),
- new ArrayList<>(), pipelineManager, eventQueue);
+ new ArrayList<>(), pipelineManager, new EventQueue());
PipelineReportHandler pipelineReportHandler =
new PipelineReportHandler(scmSafeModeManager, pipelineManager, conf);
@@ -362,7 +468,7 @@ public class TestPipelineManagerImpl {
OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
TimeUnit.MILLISECONDS);
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
pipelineManager.allowPipelineCreation();
Pipeline pipeline = pipelineManager
.createPipeline(HddsProtos.ReplicationType.RATIS,
@@ -388,6 +494,14 @@ public class TestPipelineManagerImpl {
pipelineManager.close();
}
+ @Test (expected = NotLeaderException.class)
+ public void testScrubPipelineShouldFailOnFollower() throws Exception {
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(false);
+ pipelineManager.allowPipelineCreation();
+ pipelineManager.scrubPipeline(HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE);
+ }
+
@Test
public void testPipelineNotCreatedUntilSafeModePrecheck() throws Exception {
// No timeout for pipeline scrubber.
@@ -395,7 +509,7 @@ public class TestPipelineManagerImpl {
OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
TimeUnit.MILLISECONDS);
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
try {
pipelineManager.createPipeline(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE);
@@ -433,7 +547,7 @@ public class TestPipelineManagerImpl {
OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT, -1,
TimeUnit.MILLISECONDS);
- PipelineManagerV2Impl pipelineManager = createPipelineManager();
+ PipelineManagerV2Impl pipelineManager = createPipelineManager(true);
Assert.assertTrue(pipelineManager.getSafeModeStatus());
Assert.assertFalse(pipelineManager.isPipelineCreationAllowed());
// First pass pre-check as true, but safemode still on
@@ -456,6 +570,6 @@ public class TestPipelineManagerImpl {
boolean isLeader) {
SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode report =
TestUtils.getPipelineReportFromDatanode(dn, pipeline.getId(), isLeader);
- pipelineReportHandler.onMessage(report, eventQueue);
+ pipelineReportHandler.onMessage(report, new EventQueue());
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index 935dc77..0febf06 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -485,7 +485,7 @@ public class TestSCMSafeModeManager {
@Test
- public void testDisableSafeMode() {
+ public void testDisableSafeMode() throws IOException {
OzoneConfiguration conf = new OzoneConfiguration(config);
conf.setBoolean(HddsConfigKeys.HDDS_SCM_SAFEMODE_ENABLED, false);
PipelineManager pipelineManager = Mockito.mock(PipelineManager.class);
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 09/11: HDDS-4192. enable SCM Raft Group based on
config ozone.scm.names.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 51111265a5fc737bbf9f01adc56df86e557513f6
Author: Glen Geng <gl...@tencent.com>
AuthorDate: Sat Oct 24 21:30:59 2020 +0530
HDDS-4192. enable SCM Raft Group based on config ozone.scm.names.
---
.../hadoop/hdds/scm/ha/SCMRatisServerImpl.java | 116 +++++++++++++++++++--
1 file changed, 106 insertions(+), 10 deletions(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
index 33ae109..8611b1f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMRatisServerImpl.java
@@ -18,17 +18,22 @@
package org.apache.hadoop.hdds.scm.ha;
import java.io.IOException;
+import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.atomic.AtomicLong;
+import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.HddsUtils;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.ratis.conf.RaftProperties;
import org.apache.ratis.protocol.ClientId;
import org.apache.ratis.protocol.RaftClientReply;
@@ -38,11 +43,15 @@ import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.RaftPeerId;
import org.apache.ratis.server.RaftServer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
/**
* TODO.
*/
public class SCMRatisServerImpl implements SCMRatisServer {
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SCMRatisServerImpl.class);
private final InetSocketAddress address;
private final RaftServer server;
@@ -53,24 +62,20 @@ public class SCMRatisServerImpl implements SCMRatisServer {
private final ClientId clientId = ClientId.randomId();
private final AtomicLong callId = new AtomicLong();
-
// TODO: Refactor and remove ConfigurationSource and use only
// SCMHAConfiguration.
SCMRatisServerImpl(final SCMHAConfiguration haConf,
final ConfigurationSource conf)
throws IOException {
- final String scmServiceId = "SCM-HA-Service";
- final String scmNodeId = "localhost";
- this.raftPeerId = RaftPeerId.getRaftPeerId(scmNodeId);
this.address = haConf.getRatisBindAddress();
- final RaftPeer localRaftPeer = new RaftPeer(raftPeerId, address);
- final List<RaftPeer> raftPeers = new ArrayList<>();
- raftPeers.add(localRaftPeer);
+
+ SCMHAGroupBuilder scmHAGroupBuilder = new SCMHAGroupBuilder(haConf, conf);
+ this.raftPeerId = scmHAGroupBuilder.getPeerId();
+ this.raftGroupId = scmHAGroupBuilder.getRaftGroupId();
+ this.raftGroup = scmHAGroupBuilder.getRaftGroup();
+
final RaftProperties serverProperties = RatisUtil
.newRaftProperties(haConf, conf);
- this.raftGroupId = RaftGroupId.valueOf(
- UUID.nameUUIDFromBytes(scmServiceId.getBytes(StandardCharsets.UTF_8)));
- this.raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers);
this.scmStateMachine = new SCMStateMachine();
this.server = RaftServer.newBuilder()
.setServerId(raftPeerId)
@@ -125,4 +130,95 @@ public class SCMRatisServerImpl implements SCMRatisServer {
public List<RaftPeer> getRaftPeers() {
return Collections.singletonList(new RaftPeer(raftPeerId));
}
+
+
+ /**
+ * If the SCM group starts from {@link ScmConfigKeys#OZONE_SCM_NAMES},
+ * its raft peers should locate on different nodes, and use the same port
+ * to communicate with each other.
+ *
+ * Each of the raft peer figures out its {@link RaftPeerId} by computing
+ * its position in {@link ScmConfigKeys#OZONE_SCM_NAMES}.
+ *
+ * Assume {@link ScmConfigKeys#OZONE_SCM_NAMES} is "ip0,ip1,ip2",
+ * scm with ip0 identifies its {@link RaftPeerId} as scm0,
+ * scm with ip1 identifies its {@link RaftPeerId} as scm1,
+ * scm with ip2 identifies its {@link RaftPeerId} as scm2.
+ *
+ * After startup, they will form a {@link RaftGroup} with groupID
+ * "SCM-HA-Service", and communicate with each other via
+ * ozone.scm.ha.ratis.bind.port.
+ */
+ private static class SCMHAGroupBuilder {
+ private final static String SCM_SERVICE_ID = "SCM-HA-Service";
+
+ private final RaftGroupId raftGroupId;
+ private final RaftGroup raftGroup;
+ private RaftPeerId selfPeerId;
+
+ /**
+ * @return raft group
+ */
+ public RaftGroup getRaftGroup() {
+ return raftGroup;
+ }
+
+ /**
+ * @return raft group id
+ */
+ public RaftGroupId getRaftGroupId() {
+ return raftGroupId;
+ }
+
+ /**
+ * @return raft peer id
+ */
+ public RaftPeerId getPeerId() {
+ return selfPeerId;
+ }
+
+ SCMHAGroupBuilder(final SCMHAConfiguration haConf,
+ final ConfigurationSource conf) throws IOException {
+ // fetch port
+ int port = haConf.getRatisBindAddress().getPort();
+
+ // fetch localhost
+ InetAddress localHost = InetAddress.getLocalHost();
+
+ // fetch hosts from ozone.scm.names
+ List<String> hosts =
+ Arrays.stream(conf.getTrimmedStrings(ScmConfigKeys.OZONE_SCM_NAMES))
+ .map(scmName -> HddsUtils.getHostName(scmName).get())
+ .collect(Collectors.toList());
+
+ final List<RaftPeer> raftPeers = new ArrayList<>();
+ for (int i = 0; i < hosts.size(); ++i) {
+ String nodeId = "scm" + i;
+ RaftPeerId peerId = RaftPeerId.getRaftPeerId(nodeId);
+
+ String host = hosts.get(i);
+ if (InetAddress.getByName(host).equals(localHost)) {
+ selfPeerId = peerId;
+ }
+
+ raftPeers.add(new RaftPeer(peerId, host + ":" + port));
+ }
+
+ if (selfPeerId == null) {
+ String errorMessage = "localhost " + localHost
+ + " does not exist in ozone.scm.names "
+ + conf.get(ScmConfigKeys.OZONE_SCM_NAMES);
+ throw new IOException(errorMessage);
+ }
+
+ LOG.info("Build a RaftGroup for SCMHA, " +
+ "localHost: {}, OZONE_SCM_NAMES: {}, selfPeerId: {}",
+ localHost, conf.get(ScmConfigKeys.OZONE_SCM_NAMES), selfPeerId);
+
+ raftGroupId = RaftGroupId.valueOf(UUID.nameUUIDFromBytes(
+ SCM_SERVICE_ID.getBytes(StandardCharsets.UTF_8)));
+
+ raftGroup = RaftGroup.valueOf(raftGroupId, raftPeers);
+ }
+ }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 11/11: Resolving master merge conflict.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 782057a4a15a4d562054c65d1e863e3a8bd8d368
Author: Nandakumar <na...@apache.org>
AuthorDate: Sun Oct 25 19:50:47 2020 +0530
Resolving master merge conflict.
---
.../hadoop/hdds/scm/block/BlockManagerImpl.java | 2 +-
.../scm/container/CloseContainerEventHandler.java | 2 +-
.../apache/hadoop/hdds/scm/ha/SCMHAManager.java | 2 +-
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 2 +-
.../hadoop/hdds/scm/node/NewNodeHandler.java | 2 +-
.../scm/node/NonHealthyToHealthyNodeHandler.java | 2 +-
.../hadoop/hdds/scm/pipeline/PipelineFactory.java | 3 +-
.../hadoop/hdds/scm/pipeline/PipelineManager.java | 2 +-
.../hdds/scm/pipeline/PipelineManagerMXBean.java | 2 +-
.../hdds/scm/pipeline/PipelineManagerV2Impl.java | 14 ++++-
.../hadoop/hdds/scm/ha/MockSCMHAManager.java | 4 +-
.../hadoop/hdds/scm/ha/TestSCMRatisResponse.java | 4 +-
.../hdds/scm/pipeline/MockPipelineManager.java | 10 ++++
.../scm/pipeline/MockRatisPipelineProvider.java | 9 ++-
.../scm/pipeline/TestPipelineActionHandler.java | 2 +-
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 2 +-
.../hdds/scm/pipeline/TestLeaderChoosePolicy.java | 2 +-
.../hadoop/ozone/TestStorageContainerManager.java | 4 --
.../client/rpc/TestDiscardPreallocatedBlocks.java | 2 +-
.../hadoop/ozone/scm/TestCloseContainer.java | 2 +-
.../apache/hadoop/ozone/shell/TestScmAdminHA.java | 3 +-
.../org/apache/hadoop/ozone/om/OzoneManager.java | 1 +
.../webapps/recon/ozone-recon-web/pnpm-lock.yaml | 66 ++++++++++------------
.../admin/scm/GetScmRatisRolesSubcommand.java | 9 ++-
.../apache/hadoop/ozone/admin/scm/ScmAdmin.java | 6 +-
25 files changed, 86 insertions(+), 73 deletions(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
index ec0094b..06d34ed 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java
@@ -58,7 +58,7 @@ import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVI
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
index a2b79fb..da22193 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/CloseContainerEventHandler.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
index dc68b41..8ee26a2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.hdds.scm.ha;
import java.util.List;
-import org.apache.ratis.protocol.NotLeaderException;
import org.apache.ratis.protocol.RaftPeer;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import java.io.IOException;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
index e2aa04f..c86c7d9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
@@ -21,10 +21,10 @@ import com.google.common.base.Preconditions;
import java.util.List;
import java.util.stream.Collectors;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
-import org.apache.ratis.protocol.NotLeaderException;
import org.apache.ratis.protocol.RaftGroupMemberId;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.apache.ratis.server.RaftServer;
import org.apache.ratis.server.impl.RaftServerImpl;
import org.apache.ratis.server.impl.RaftServerProxy;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
index 42cada9..08b5152 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NewNodeHandler.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
index e73231b..1cb6501 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NonHealthyToHealthyNodeHandler.java
@@ -23,7 +23,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.server.events.EventHandler;
import org.apache.hadoop.hdds.server.events.EventPublisher;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
index bdd5053..68401d2 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineFactory.java
@@ -45,7 +45,8 @@ public class PipelineFactory {
providers.put(ReplicationType.STAND_ALONE,
new SimplePipelineProvider(nodeManager, stateManager));
providers.put(ReplicationType.RATIS,
- new RatisPipelineProvider(nodeManager, stateManager, conf,
+ new RatisPipelineProvider(nodeManager,
+ (PipelineStateManager) stateManager, conf,
eventPublisher));
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
index ddd461b..9f714da 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManager.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.safemode.SCMSafeModeManager.SafeModeStatus;
import org.apache.hadoop.hdds.server.events.EventHandler;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
/**
* Interface which exposes the api for pipeline management.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
index 55e096b..57eab61 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerMXBean.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.hdds.scm.pipeline;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import java.util.Map;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
index 4690f29..8c4d7b7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hdds.utils.Scheduler;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.Time;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -460,6 +460,18 @@ public final class PipelineManagerV2Impl implements PipelineManager {
metrics.incNumBlocksAllocated(id);
}
+ @Override
+ public int minHealthyVolumeNum(Pipeline pipeline) {
+ // TODO:
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public int minPipelineLimit(Pipeline pipeline) {
+ // TODO:
+ throw new UnsupportedOperationException();
+ }
+
/**
* Activates a dormant pipeline.
*
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
index e31e7e1..886eaee 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
@@ -29,14 +29,14 @@ import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.hdds.protocol.proto.SCMRatisProtocol.RequestType;
import org.apache.ratis.protocol.ClientId;
import org.apache.ratis.protocol.Message;
-import org.apache.ratis.protocol.NotLeaderException;
import org.apache.ratis.protocol.RaftClientReply;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftGroupMemberId;
import org.apache.ratis.protocol.RaftPeer;
import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.protocol.StateMachineException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.apache.ratis.server.RaftServer;
+import org.apache.ratis.protocol.exceptions.StateMachineException;
/**
* Mock SCMHAManager implementation for testing.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java
index daf0856..05e2970 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/TestSCMRatisResponse.java
@@ -20,13 +20,13 @@ package org.apache.hadoop.hdds.scm.ha;
import com.google.protobuf.InvalidProtocolBufferException;
import org.apache.ratis.protocol.ClientId;
-import org.apache.ratis.protocol.LeaderNotReadyException;
import org.apache.ratis.protocol.Message;
import org.apache.ratis.protocol.RaftClientReply;
-import org.apache.ratis.protocol.RaftException;
import org.apache.ratis.protocol.RaftGroupId;
import org.apache.ratis.protocol.RaftGroupMemberId;
import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.exceptions.LeaderNotReadyException;
+import org.apache.ratis.protocol.exceptions.RaftException;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
index 6292ad4..947cd37 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockPipelineManager.java
@@ -193,6 +193,16 @@ public final class MockPipelineManager implements PipelineManager {
}
@Override
+ public int minHealthyVolumeNum(Pipeline pipeline) {
+ return 0;
+ }
+
+ @Override
+ public int minPipelineLimit(Pipeline pipeline) {
+ return 0;
+ }
+
+ @Override
public void activatePipeline(final PipelineID pipelineID)
throws IOException {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
index 49cac8b..cd0c475 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
@@ -39,20 +39,23 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider {
NodeManager nodeManager, StateManager stateManager,
ConfigurationSource conf, EventPublisher eventPublisher,
boolean autoOpen) {
- super(nodeManager, stateManager, conf, eventPublisher);
+ super(nodeManager, (PipelineStateManager) stateManager,
+ conf, eventPublisher);
autoOpenPipeline = autoOpen;
}
public MockRatisPipelineProvider(NodeManager nodeManager,
StateManager stateManager,
ConfigurationSource conf) {
- super(nodeManager, stateManager, conf, new EventQueue());
+ super(nodeManager, (PipelineStateManager) stateManager,
+ conf, new EventQueue());
}
public MockRatisPipelineProvider(
NodeManager nodeManager, StateManager stateManager,
ConfigurationSource conf, EventPublisher eventPublisher) {
- super(nodeManager, stateManager, conf, eventPublisher);
+ super(nodeManager, (PipelineStateManager) stateManager,
+ conf, eventPublisher);
autoOpenPipeline = true;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
index e40c8ba..4517b89 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineActionHandler.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.PipelineActionsFromDatanode;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.Test;
import org.mockito.Mockito;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 642378f..51fff06 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -38,7 +38,7 @@ import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.NotLeaderException;
+import org.apache.ratis.protocol.exceptions.NotLeaderException;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
index ecf1c2f..c043c56 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestLeaderChoosePolicy.java
@@ -182,7 +182,7 @@ public class TestLeaderChoosePolicy {
int destroyNum = r.nextInt(pipelines.size());
for (int k = 0; k <= destroyNum; k++) {
- pipelineManager.finalizeAndDestroyPipeline(pipelines.get(k), false);
+ pipelineManager.closePipeline(pipelines.get(k), false);
}
waitForPipelines(pipelineNum);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 90f4996..ce696c5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -26,12 +26,8 @@ import static org.apache.hadoop.hdds.HddsConfigKeys
import static org.apache.hadoop.hdds.HddsConfigKeys
.HDDS_SCM_SAFEMODE_PIPELINE_CREATION;
import static org.junit.Assert.fail;
-<<<<<<< HEAD
import org.apache.hadoop.hdds.scm.TestUtils;
-import org.junit.Ignore;
-=======
->>>>>>> master
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
index 37e13b6..fd52333 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDiscardPreallocatedBlocks.java
@@ -146,7 +146,7 @@ public class TestDiscardPreallocatedBlocks{
long containerID = locationInfos.get(0).getContainerID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
index fe05859..888422a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestCloseContainer.java
@@ -105,7 +105,7 @@ public class TestCloseContainer {
ContainerInfo container = scm.getContainerManager().getContainers().get(0);
Pipeline pipeline = scm.getPipelineManager()
.getPipeline(container.getPipelineID());
- scm.getPipelineManager().finalizeAndDestroyPipeline(pipeline, false);
+ scm.getPipelineManager().closePipeline(pipeline, false);
GenericTestUtils.waitFor(() ->
container.getState() == HddsProtos.LifeCycleState.CLOSED,
200, 30000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
index 63a8e71..02f8815 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
@@ -19,10 +19,11 @@ package org.apache.hadoop.ozone.shell;
import java.net.InetSocketAddress;
import java.util.UUID;
+
+import org.apache.hadoop.hdds.cli.OzoneAdmin;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.admin.OzoneAdmin;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 307ec30..b012a24 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -187,6 +187,7 @@ import org.apache.commons.lang3.tuple.Pair;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml
index 0385fd0..516ac88 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/pnpm-lock.yaml
@@ -42,7 +42,7 @@ devDependencies:
json-server: 0.15.1
npm-run-all: 4.1.5
xo: 0.30.0
-lockfileVersion: 5.1
+lockfileVersion: 5.2
packages:
/3d-view/2.0.0:
dependencies:
@@ -2033,7 +2033,7 @@ packages:
jest-haste-map: 24.9.0
jest-message-util: 24.9.0
jest-regex-util: 24.9.0
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
jest-resolve-dependencies: 24.9.0
jest-runner: 24.9.0
jest-runtime: 24.9.0
@@ -2088,7 +2088,7 @@ packages:
istanbul-lib-source-maps: 3.0.6
istanbul-reports: 2.2.7
jest-haste-map: 24.9.0
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
jest-runtime: 24.9.0
jest-util: 24.9.0
jest-worker: 24.9.0
@@ -2196,7 +2196,7 @@ packages:
integrity: sha1-zlblOfg1UrWNENZy6k1vya3HsjQ=
/@mapbox/mapbox-gl-supported/1.5.0_mapbox-gl@1.10.1:
dependencies:
- mapbox-gl: 1.10.1_mapbox-gl@1.10.1
+ mapbox-gl: 1.10.1
dev: false
peerDependencies:
mapbox-gl: '>=0.32.1 <2.0.0'
@@ -3470,7 +3470,7 @@ packages:
mkdirp: 0.5.5
pify: 4.0.1
schema-utils: 2.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>= 6.9'
@@ -5016,7 +5016,7 @@ packages:
postcss-modules-values: 3.0.0
postcss-value-parser: 4.1.0
schema-utils: 2.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>= 8.9.0'
@@ -6176,7 +6176,7 @@ packages:
loader-utils: 1.4.0
object-hash: 2.0.3
schema-utils: 2.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>= 8.9.0'
@@ -6912,7 +6912,7 @@ packages:
dependencies:
loader-utils: 1.4.0
schema-utils: 2.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>= 8.9.0'
@@ -8216,7 +8216,7 @@ packages:
pretty-error: 2.1.1
tapable: 1.1.3
util.promisify: 1.0.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>=6.9'
@@ -9214,7 +9214,7 @@ packages:
jest-get-type: 24.9.0
jest-jasmine2: 24.9.0
jest-regex-util: 24.9.0
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
jest-util: 24.9.0
jest-validate: 24.9.0
micromatch: 3.1.10
@@ -9403,7 +9403,7 @@ packages:
integrity: sha512-3BEYN5WbSq9wd+SyLDES7AHnjH9A/ROBwmz7l2y+ol+NtSFO8DYiEBzoO1CeFc9a8DYy10EO4dDFVv/wN3zl1w==
/jest-pnp-resolver/1.2.1_jest-resolve@24.9.0:
dependencies:
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
dev: false
engines:
node: '>=6'
@@ -9430,7 +9430,7 @@ packages:
node: '>= 6'
resolution:
integrity: sha512-Fm7b6AlWnYhT0BXy4hXpactHIqER7erNgIsIozDXWl5dVm+k8XdGVe1oTg1JyaFnOxarMEbax3wyRJqGP2Pq+g==
- /jest-resolve/24.9.0_jest-resolve@24.9.0:
+ /jest-resolve/24.9.0:
dependencies:
'@jest/types': 24.9.0
browser-resolve: 1.11.3
@@ -9440,8 +9440,6 @@ packages:
dev: false
engines:
node: '>= 6'
- peerDependencies:
- jest-resolve: '*'
resolution:
integrity: sha512-TaLeLVL1l08YFZAt3zaPtjiVvyy4oSA6CRe+0AFPPVX3Q/VI0giIWWoAvoS5L96vj9Dqxj4fB5p2qrHCmTU/MQ==
/jest-runner/24.9.0:
@@ -9459,7 +9457,7 @@ packages:
jest-jasmine2: 24.9.0
jest-leak-detector: 24.9.0
jest-message-util: 24.9.0
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
jest-runtime: 24.9.0
jest-util: 24.9.0
jest-worker: 24.9.0
@@ -9487,7 +9485,7 @@ packages:
jest-message-util: 24.9.0
jest-mock: 24.9.0
jest-regex-util: 24.9.0
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
jest-snapshot: 24.9.0
jest-util: 24.9.0
jest-validate: 24.9.0
@@ -9517,7 +9515,7 @@ packages:
jest-get-type: 24.9.0
jest-matcher-utils: 24.9.0
jest-message-util: 24.9.0
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
mkdirp: 0.5.5
natural-compare: 1.4.0
pretty-format: 24.9.0
@@ -10289,7 +10287,7 @@ packages:
node: '>=0.10.0'
resolution:
integrity: sha1-7Nyo8TFE5mDxtb1B8S80edmN+48=
- /mapbox-gl/1.10.1_mapbox-gl@1.10.1:
+ /mapbox-gl/1.10.1:
dependencies:
'@mapbox/geojson-rewind': 0.5.0
'@mapbox/geojson-types': 1.0.2
@@ -10317,8 +10315,6 @@ packages:
dev: false
engines:
node: '>=6.4.0'
- peerDependencies:
- mapbox-gl: '*'
resolution:
integrity: sha512-0aHt+lFUpYfvh0kMIqXqNXqoYMuhuAsMlw87TbhWrw78Tx2zfuPI0Lx31/YPUgJ+Ire0tzQ4JnuBL7acDNXmMg==
/marching-simplex-table/1.0.0:
@@ -10575,7 +10571,7 @@ packages:
loader-utils: 1.4.0
normalize-url: 1.9.1
schema-utils: 1.0.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
webpack-sources: 1.4.3
dev: false
engines:
@@ -11304,7 +11300,7 @@ packages:
dependencies:
cssnano: 4.1.10
last-call-webpack-plugin: 3.0.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
peerDependencies:
webpack: ^4.0.0
@@ -11870,7 +11866,7 @@ packages:
has-hover: 1.0.1
has-passive-events: 1.0.0
is-mobile: 2.2.1
- mapbox-gl: 1.10.1_mapbox-gl@1.10.1
+ mapbox-gl: 1.10.1
matrix-camera-controller: 2.1.3
mouse-change: 1.4.0
mouse-event-offset: 3.0.2
@@ -13658,7 +13654,7 @@ packages:
identity-obj-proxy: 3.0.0
jest: 24.9.0
jest-environment-jsdom-fourteen: 1.0.1
- jest-resolve: 24.9.0_jest-resolve@24.9.0
+ jest-resolve: 24.9.0
jest-watch-typeahead: 0.4.2
mini-css-extract-plugin: 0.9.0_webpack@4.42.0
optimize-css-assets-webpack-plugin: 5.0.3_webpack@4.42.0
@@ -13679,7 +13675,7 @@ packages:
ts-pnp: 1.1.6_typescript@3.4.5
typescript: 3.4.5
url-loader: 2.3.0_file-loader@4.3.0+webpack@4.42.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
webpack-dev-server: 3.10.3_webpack@4.42.0
webpack-manifest-plugin: 2.2.0_webpack@4.42.0
workbox-webpack-plugin: 4.3.1_webpack@4.42.0
@@ -14512,7 +14508,7 @@ packages:
neo-async: 2.6.1
schema-utils: 2.7.0
semver: 6.3.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>= 8.9.0'
@@ -15583,7 +15579,7 @@ packages:
serialize-javascript: 3.1.0
source-map: 0.6.1
terser: 4.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
webpack-sources: 1.4.3
worker-farm: 1.7.0
dev: false
@@ -15603,7 +15599,7 @@ packages:
serialize-javascript: 2.1.2
source-map: 0.6.1
terser: 4.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
webpack-sources: 1.4.3
dev: false
engines:
@@ -16188,7 +16184,7 @@ packages:
loader-utils: 1.4.0
mime: 2.4.6
schema-utils: 2.7.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>= 8.9.0'
@@ -16491,7 +16487,7 @@ packages:
mime: 2.4.6
mkdirp: 0.5.5
range-parser: 1.2.1
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
webpack-log: 2.0.0
dev: false
engines:
@@ -16531,7 +16527,7 @@ packages:
strip-ansi: 3.0.1
supports-color: 6.1.0
url: 0.11.0
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
webpack-dev-middleware: 3.7.2_webpack@4.42.0
webpack-log: 2.0.0
ws: 6.2.1
@@ -16563,7 +16559,7 @@ packages:
lodash: 4.17.15
object.entries: 1.1.2
tapable: 1.1.3
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
dev: false
engines:
node: '>=6.11.5'
@@ -16578,7 +16574,7 @@ packages:
dev: false
resolution:
integrity: sha512-lgTS3Xhv1lCOKo7SA5TjKXMjpSM4sBjNV5+q2bqesbSPs5FjGmU6jjtBSkX9b4qW87vDIsCIlUPOEhbZrMdjeQ==
- /webpack/4.42.0_webpack@4.42.0:
+ /webpack/4.42.0:
dependencies:
'@webassemblyjs/ast': 1.8.5
'@webassemblyjs/helper-module-context': 1.8.5
@@ -16607,8 +16603,6 @@ packages:
engines:
node: '>=6.11.5'
hasBin: true
- peerDependencies:
- webpack: '*'
resolution:
integrity: sha512-EzJRHvwQyBiYrYqhyjW9AqM90dE4+s1/XtCfn7uWg6cS72zH+2VPFAlsnW0+W0cDi0XRjNKUMoJtpSi50+Ph6w==
/websocket-driver/0.7.4:
@@ -16819,7 +16813,7 @@ packages:
dependencies:
'@babel/runtime': 7.10.2
json-stable-stringify: 1.0.1
- webpack: 4.42.0_webpack@4.42.0
+ webpack: 4.42.0
workbox-build: 4.3.1
dev: false
engines:
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
index cf2310c..c784c44 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
@@ -17,9 +17,10 @@
*/
package org.apache.hadoop.ozone.admin.scm;
+import java.io.IOException;
import java.util.List;
-import java.util.concurrent.Callable;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.scm.cli.ScmSubcommand;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import picocli.CommandLine;
@@ -31,16 +32,14 @@ import picocli.CommandLine;
description = "List all SCMs and their respective Ratis server roles",
mixinStandardHelpOptions = true,
versionProvider = HddsVersionProvider.class)
-public class GetScmRatisRolesSubcommand implements Callable<Void> {
+public class GetScmRatisRolesSubcommand extends ScmSubcommand {
@CommandLine.ParentCommand
private ScmAdmin parent;
@Override
- public Void call() throws Exception {
- ScmClient scmClient = parent.createScmClient();
+ protected void execute(ScmClient scmClient) throws IOException {
List<String> roles = scmClient.getScmRatisRoles();
System.out.println(roles);
- return null;
}
}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
index 2605a6d..b05e65a 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
@@ -19,8 +19,7 @@ package org.apache.hadoop.ozone.admin.scm;
import org.apache.hadoop.hdds.cli.GenericCli;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
-import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.ozone.admin.OzoneAdmin;
+import org.apache.hadoop.hdds.cli.OzoneAdmin;
import picocli.CommandLine;
import picocli.CommandLine.Model.CommandSpec;
import picocli.CommandLine.Spec;
@@ -54,7 +53,4 @@ public class ScmAdmin extends GenericCli {
return null;
}
- public ScmClient createScmClient() {
- return parent.createScmClient();
- }
}
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 03/11: HDDS-4059.
SCMStateMachine::applyTransaction() should not invoke
TransactionContext.getClientRequest().
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 3ed29d88202d20873f9f2b7f4e9635ee954bdaf8
Author: Glen Geng <gl...@tencent.com>
AuthorDate: Sat Oct 24 20:58:07 2020 +0530
HDDS-4059. SCMStateMachine::applyTransaction() should not invoke TransactionContext.getClientRequest().
---
.../src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
index b10dd54..ee26e58 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMStateMachine.java
@@ -54,7 +54,7 @@ public class SCMStateMachine extends BaseStateMachine {
new CompletableFuture<>();
try {
final SCMRatisRequest request = SCMRatisRequest.decode(
- trx.getClientRequest().getMessage());
+ Message.valueOf(trx.getStateMachineLogEntry().getLogData()));
applyTransactionFuture.complete(process(request));
} catch (Exception ex) {
applyTransactionFuture.completeExceptionally(ex);
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 07/11: HDDS-4115. CLI command to show current SCM
leader and follower status.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 5f3981c40484f739ac6d258d95446ac2b2cdd36a
Author: Rui Wang <am...@users.noreply.github.com>
AuthorDate: Sat Oct 24 21:16:24 2020 +0530
HDDS-4115. CLI command to show current SCM leader and follower status.
---
.../java/org/apache/hadoop/hdds/scm/ScmInfo.java | 32 ++++++++-
.../apache/hadoop/hdds/scm/client/ScmClient.java | 5 +-
...inerLocationProtocolClientSideTranslatorPB.java | 4 +-
.../interface-client/src/main/proto/hdds.proto | 1 +
.../apache/hadoop/hdds/scm/ha/SCMHAManager.java | 6 ++
.../hadoop/hdds/scm/ha/SCMHAManagerImpl.java | 11 +++
...inerLocationProtocolServerSideTranslatorPB.java | 2 +-
.../hdds/scm/server/SCMClientProtocolServer.java | 3 +-
.../hdds/scm/server/StorageContainerManager.java | 4 ++
.../hadoop/hdds/scm/ha/MockSCMHAManager.java | 9 +++
.../hdds/scm/cli/ContainerOperationClient.java | 4 ++
.../dist/src/main/smoketest/admincli/scmha.robot | 28 ++++++++
.../apache/hadoop/ozone/shell/TestScmAdminHA.java | 79 ++++++++++++++++++++++
.../admin/scm/GetScmRatisRolesSubcommand.java | 46 +++++++++++++
.../apache/hadoop/ozone/admin/scm/ScmAdmin.java | 60 ++++++++++++++++
.../hadoop/ozone/admin/scm/package-info.java | 22 ++++++
16 files changed, 310 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
index 6236feb..b9d823e 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmInfo.java
@@ -18,6 +18,9 @@
package org.apache.hadoop.hdds.scm;
+import java.util.ArrayList;
+import java.util.List;
+
/**
* ScmInfo wraps the result returned from SCM#getScmInfo which
* contains clusterId and the SCM Id.
@@ -25,6 +28,7 @@ package org.apache.hadoop.hdds.scm;
public final class ScmInfo {
private String clusterId;
private String scmId;
+ private List<String> peerRoles;
/**
* Builder for ScmInfo.
@@ -32,6 +36,11 @@ public final class ScmInfo {
public static class Builder {
private String clusterId;
private String scmId;
+ private List<String> peerRoles;
+
+ public Builder() {
+ peerRoles = new ArrayList<>();
+ }
/**
* sets the cluster id.
@@ -53,14 +62,25 @@ public final class ScmInfo {
return this;
}
+ /**
+ * Set peer address in Scm HA.
+ * @param roles ratis peer address in the format of [ip|hostname]:port
+ * @return Builder for scmInfo
+ */
+ public Builder setRatisPeerRoles(List<String> roles) {
+ peerRoles.addAll(roles);
+ return this;
+ }
+
public ScmInfo build() {
- return new ScmInfo(clusterId, scmId);
+ return new ScmInfo(clusterId, scmId, peerRoles);
}
}
- private ScmInfo(String clusterId, String scmId) {
+ private ScmInfo(String clusterId, String scmId, List<String> peerRoles) {
this.clusterId = clusterId;
this.scmId = scmId;
+ this.peerRoles = peerRoles;
}
/**
@@ -78,4 +98,12 @@ public final class ScmInfo {
public String getScmId() {
return scmId;
}
+
+ /**
+ * Gets the list of peer roles (currently address) in Scm HA.
+ * @return List of peer address
+ */
+ public List<String> getRatisPeerRoles() {
+ return peerRoles;
+ }
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
index e4369fa..7c3c94c 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java
@@ -246,5 +246,8 @@ public interface ScmClient extends Closeable {
*/
boolean getReplicationManagerStatus() throws IOException;
-
+ /**
+ * returns the list of ratis peer roles. Currently only include peer address.
+ */
+ List<String> getScmRatisRoles() throws IOException;
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
index 0733940..cf88869 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java
@@ -444,7 +444,9 @@ public final class StorageContainerLocationProtocolClientSideTranslatorPB
.getGetScmInfoResponse();
ScmInfo.Builder builder = new ScmInfo.Builder()
.setClusterId(resp.getClusterId())
- .setScmId(resp.getScmId());
+ .setScmId(resp.getScmId())
+ .setRatisPeerRoles(resp.getPeerRolesList());
+
return builder.build();
}
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index d89e7b4..f0c9b37 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -211,6 +211,7 @@ message GetScmInfoRequestProto {
message GetScmInfoResponseProto {
required string clusterId = 1;
required string scmId = 2;
+ repeated string peerRoles = 3;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
index ade0ad9..dc68b41 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManager.java
@@ -17,6 +17,7 @@
package org.apache.hadoop.hdds.scm.ha;
+import java.util.List;
import org.apache.ratis.protocol.NotLeaderException;
import org.apache.ratis.protocol.RaftPeer;
@@ -53,6 +54,11 @@ public interface SCMHAManager {
void shutdown() throws IOException;
/**
+ * Returns roles of ratis peers.
+ */
+ List<String> getRatisRoles();
+
+ /**
* Returns NotLeaderException with useful info.
*/
NotLeaderException triggerNotLeaderException();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
index 8bb9457..e2aa04f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAManagerImpl.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.hdds.scm.ha;
import com.google.common.base.Preconditions;
+import java.util.List;
+import java.util.stream.Collectors;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.ratis.protocol.NotLeaderException;
import org.apache.ratis.protocol.RaftGroupMemberId;
@@ -145,6 +147,15 @@ public class SCMHAManagerImpl implements SCMHAManager {
ratisServer.stop();
}
+ @Override
+ public List<String> getRatisRoles() {
+ return getRatisServer()
+ .getRaftPeers()
+ .stream()
+ .map(peer -> peer.getAddress() == null ? "" : peer.getAddress())
+ .collect(Collectors.toList());
+ }
+
/**
* {@inheritDoc}
*/
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
index d5496b4..24f17f1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocolServerSideTranslatorPB.java
@@ -440,8 +440,8 @@ public final class StorageContainerLocationProtocolServerSideTranslatorPB
return HddsProtos.GetScmInfoResponseProto.newBuilder()
.setClusterId(scmInfo.getClusterId())
.setScmId(scmInfo.getScmId())
+ .addAllPeerRoles(scmInfo.getRatisPeerRoles())
.build();
-
}
public InSafeModeResponseProto inSafeMode(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 594527a..3ad31d7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -488,7 +488,8 @@ public class SCMClientProtocolServer implements
ScmInfo.Builder builder =
new ScmInfo.Builder()
.setClusterId(scm.getScmStorageConfig().getClusterID())
- .setScmId(scm.getScmStorageConfig().getScmId());
+ .setScmId(scm.getScmStorageConfig().getScmId())
+ .setRatisPeerRoles(scm.getScmHAManager().getRatisRoles());
return builder.build();
} catch (Exception ex) {
auditSuccess = false;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 4513857..3d1ad72 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -1184,4 +1184,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
public String getClusterId() {
return getScmStorageConfig().getClusterID();
}
+
+ public SCMHAManager getScmHAManager() {
+ return scmHAManager;
+ }
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
index ce48c11..e31e7e1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/ha/MockSCMHAManager.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdds.scm.ha;
import java.io.IOException;
import java.lang.reflect.InvocationTargetException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.EnumMap;
import java.util.List;
import java.util.Map;
@@ -107,6 +108,14 @@ public final class MockSCMHAManager implements SCMHAManager {
ratisServer.stop();
}
+ @Override
+ public List<String> getRatisRoles() {
+ return Arrays.asList(
+ "180.3.14.5:9865",
+ "180.3.14.21:9865",
+ "180.3.14.145:9865");
+ }
+
/**
* {@inheritDoc}
*/
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
index 96cd530..0383642 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/ContainerOperationClient.java
@@ -508,4 +508,8 @@ public class ContainerOperationClient implements ScmClient {
return storageContainerLocationClient.getReplicationManagerStatus();
}
+ @Override
+ public List<String> getScmRatisRoles() throws IOException {
+ return storageContainerLocationClient.getScmInfo().getRatisPeerRoles();
+ }
}
diff --git a/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot
new file mode 100644
index 0000000..31a990f
--- /dev/null
+++ b/hadoop-ozone/dist/src/main/smoketest/admincli/scmha.robot
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+*** Settings ***
+Documentation Smoketest ozone cluster startup
+Library OperatingSystem
+Library BuiltIn
+Resource ../commonlib.robot
+Test Timeout 5 minutes
+
+*** Variables ***
+
+*** Test Cases ***
+Run scm roles
+ ${output} = Execute ozone admin scm roles
+ Should contain ${output} []
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
new file mode 100644
index 0000000..63a8e71
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestScmAdminHA.java
@@ -0,0 +1,79 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.shell;
+
+import java.net.InetSocketAddress;
+import java.util.UUID;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.admin.OzoneAdmin;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+/**
+ * This class tests ozone admin scm commands.
+ */
+public class TestScmAdminHA {
+ private static OzoneAdmin ozoneAdmin;
+ private static OzoneConfiguration conf;
+ private static String omServiceId;
+ private static int numOfOMs;
+ private static String clusterId;
+ private static String scmId;
+ private static MiniOzoneCluster cluster;
+
+ @BeforeClass
+ public static void init() throws Exception {
+ ozoneAdmin = new OzoneAdmin();
+ conf = new OzoneConfiguration();
+
+ // Init HA cluster
+ omServiceId = "om-service-test1";
+ numOfOMs = 3;
+ clusterId = UUID.randomUUID().toString();
+ scmId = UUID.randomUUID().toString();
+ cluster = MiniOzoneCluster.newHABuilder(conf)
+ .setClusterId(clusterId)
+ .setScmId(scmId)
+ .setOMServiceId(omServiceId)
+ .setNumOfOzoneManagers(numOfOMs)
+ .build();
+ conf.setQuietMode(false);
+ // enable ratis for Scm.
+ conf.setBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, true);
+ cluster.waitForClusterToBeReady();
+ }
+
+ @AfterClass
+ public static void shutdown() {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test
+ public void testGetRatisRoles() {
+ InetSocketAddress address =
+ cluster.getStorageContainerManager().getClientRpcAddress();
+ String hostPort = address.getHostName() + ":" + address.getPort();
+ String[] args = {"--scm", hostPort, "scm", "roles"};
+ ozoneAdmin.execute(args);
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
new file mode 100644
index 0000000..cf2310c
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/GetScmRatisRolesSubcommand.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.admin.scm;
+
+import java.util.List;
+import java.util.concurrent.Callable;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import picocli.CommandLine;
+
+/**
+ * Handler of scm status command.
+ */
+@CommandLine.Command(
+ name = "roles",
+ description = "List all SCMs and their respective Ratis server roles",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class)
+public class GetScmRatisRolesSubcommand implements Callable<Void> {
+
+ @CommandLine.ParentCommand
+ private ScmAdmin parent;
+
+ @Override
+ public Void call() throws Exception {
+ ScmClient scmClient = parent.createScmClient();
+ List<String> roles = scmClient.getScmRatisRoles();
+ System.out.println(roles);
+ return null;
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
new file mode 100644
index 0000000..2605a6d
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/ScmAdmin.java
@@ -0,0 +1,60 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.admin.scm;
+
+import org.apache.hadoop.hdds.cli.GenericCli;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.scm.client.ScmClient;
+import org.apache.hadoop.ozone.admin.OzoneAdmin;
+import picocli.CommandLine;
+import picocli.CommandLine.Model.CommandSpec;
+import picocli.CommandLine.Spec;
+
+/**
+ * Subcommand for admin operations related to SCM.
+ */
+@CommandLine.Command(
+ name = "scm",
+ description = "Ozone Storage Container Manager specific admin operations",
+ mixinStandardHelpOptions = true,
+ versionProvider = HddsVersionProvider.class,
+ subcommands = {
+ GetScmRatisRolesSubcommand.class
+ })
+public class ScmAdmin extends GenericCli {
+
+ @CommandLine.ParentCommand
+ private OzoneAdmin parent;
+
+ @Spec
+ private CommandSpec spec;
+
+ public OzoneAdmin getParent() {
+ return parent;
+ }
+
+ @Override
+ public Void call() throws Exception {
+ GenericCli.missingSubcommand(spec);
+ return null;
+ }
+
+ public ScmClient createScmClient() {
+ return parent.createScmClient();
+ }
+}
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java
new file mode 100644
index 0000000..ec15a33
--- /dev/null
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/admin/scm/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * SCM related Admin tools.
+ */
+package org.apache.hadoop.ozone.admin.scm;
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 06/11: HDDS-3895. Implement container related
operations in ContainerManagerImpl.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 9e0dd848d7a363a64733518335fbfb5e8d47a20f
Author: Nandakumar <na...@apache.org>
AuthorDate: Sat Oct 24 21:08:33 2020 +0530
HDDS-3895. Implement container related operations in ContainerManagerImpl.
---
.../hadoop/hdds/scm/container/ContainerID.java | 32 +++-
.../hadoop/hdds/scm/container/ContainerInfo.java | 7 +-
.../scm/container/common/helpers/ExcludeList.java | 2 +-
.../interface-client/src/main/proto/hdds.proto | 7 +
.../block/DatanodeDeletedBlockTransactions.java | 2 +-
.../hadoop/hdds/scm/block/DeletedBlockLogImpl.java | 2 +-
.../container/AbstractContainerReportHandler.java | 2 +-
.../scm/container/ContainerActionsHandler.java | 2 +-
.../hdds/scm/container/ContainerManagerImpl.java | 175 +++++++++----------
.../hdds/scm/container/ContainerManagerV2.java | 65 +++----
.../hdds/scm/container/ContainerReportHandler.java | 6 +-
.../hdds/scm/container/ContainerStateManager.java | 36 ++--
.../scm/container/ContainerStateManagerImpl.java | 194 ++++++++++++---------
.../scm/container/ContainerStateManagerV2.java | 40 ++++-
.../IncrementalContainerReportHandler.java | 2 +-
.../hdds/scm/container/SCMContainerManager.java | 13 +-
.../scm/container/states/ContainerAttribute.java | 2 +-
.../scm/container/states/ContainerStateMap.java | 172 +++++++++---------
.../hadoop/hdds/scm/metadata/ContainerIDCodec.java | 4 +-
.../hdds/scm/server/SCMClientProtocolServer.java | 20 +--
.../hdds/scm/server/StorageContainerManager.java | 2 +-
.../hadoop/hdds/scm/block/TestDeletedBlockLog.java | 7 +-
.../container/TestCloseContainerEventHandler.java | 4 +-
.../scm/container/TestContainerActionsHandler.java | 2 +-
.../scm/container/TestContainerManagerImpl.java | 30 +++-
.../scm/container/TestSCMContainerManager.java | 2 +-
.../container/states/TestContainerAttribute.java | 18 +-
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 10 +-
.../scm/node/states/TestNode2ContainerMap.java | 10 +-
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 4 +-
.../scm/pipeline/TestPipelineStateManager.java | 22 +--
.../hdds/scm/pipeline/TestSCMPipelineManager.java | 4 +-
.../hadoop/ozone/client/io/KeyOutputStream.java | 2 +-
.../TestContainerStateManagerIntegration.java | 8 +-
.../metrics/TestSCMContainerManagerMetrics.java | 6 +-
.../org/apache/hadoop/ozone/OzoneTestUtils.java | 10 +-
.../rpc/TestContainerReplicationEndToEnd.java | 6 +-
.../client/rpc/TestFailureHandlingByClient.java | 10 +-
.../rpc/TestFailureHandlingByClientFlushDelay.java | 2 +-
.../rpc/TestMultiBlockWritesWithDnFailures.java | 4 +-
.../rpc/TestOzoneClientRetriesOnException.java | 4 +-
...estOzoneClientRetriesOnExceptionFlushDelay.java | 2 +-
.../client/rpc/TestOzoneRpcClientAbstract.java | 2 +-
.../hadoop/ozone/client/rpc/TestReadRetries.java | 2 +-
.../apache/hadoop/ozone/container/TestHelper.java | 6 +-
.../TestCloseContainerByPipeline.java | 8 +-
.../commandhandler/TestCloseContainerHandler.java | 2 +-
.../commandhandler/TestDeleteContainerHandler.java | 2 +-
.../hadoop/ozone/dn/scrubber/TestDataScrubber.java | 2 +-
.../ozone/om/TestContainerReportWithKeys.java | 2 +-
.../hadoop/ozone/recon/TestReconAsPassiveScm.java | 2 +-
.../org/apache/hadoop/ozone/scm/TestSCMMXBean.java | 12 +-
.../hadoop/ozone/recon/api/ContainerEndpoint.java | 4 +-
.../ozone/recon/fsck/ContainerHealthTask.java | 2 +-
.../ozone/recon/scm/ReconContainerManager.java | 2 +-
.../recon/scm/ReconContainerReportHandler.java | 2 +-
.../ReconIncrementalContainerReportHandler.java | 2 +-
.../ozone/recon/api/TestContainerEndpoint.java | 2 +-
.../recon/fsck/TestContainerHealthStatus.java | 2 +-
.../ozone/recon/fsck/TestContainerHealthTask.java | 24 +--
.../TestContainerHealthTaskRecordGenerator.java | 2 +-
.../scm/AbstractReconContainerManagerTest.java | 6 +-
.../ozone/recon/scm/TestReconContainerManager.java | 2 +-
...TestReconIncrementalContainerReportHandler.java | 2 +-
64 files changed, 563 insertions(+), 483 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
index bb44da4..1a6be96 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerID.java
@@ -23,6 +23,7 @@ import com.google.common.primitives.Longs;
import org.apache.commons.lang3.builder.CompareToBuilder;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
/**
* Container ID is an integer that is a value between 1..MAX_CONTAINER ID.
@@ -34,13 +35,14 @@ public final class ContainerID implements Comparable<ContainerID> {
private final long id;
- // TODO: make this private.
/**
* Constructs ContainerID.
*
* @param id int
*/
- public ContainerID(long id) {
+ private ContainerID(long id) {
+ Preconditions.checkState(id > 0,
+ "Container ID should be a positive. %s.", id);
this.id = id;
}
@@ -49,9 +51,7 @@ public final class ContainerID implements Comparable<ContainerID> {
* @param containerID long
* @return ContainerID.
*/
- public static ContainerID valueof(final long containerID) {
- Preconditions.checkState(containerID > 0,
- "Container ID should be a positive long. "+ containerID);
+ public static ContainerID valueOf(final long containerID) {
return new ContainerID(containerID);
}
@@ -60,14 +60,30 @@ public final class ContainerID implements Comparable<ContainerID> {
*
* @return int
*/
+ @Deprecated
+ /*
+ * Don't expose the int value.
+ */
public long getId() {
return id;
}
+ /**
+ * Use proto message.
+ */
+ @Deprecated
public byte[] getBytes() {
return Longs.toByteArray(id);
}
+ public HddsProtos.ContainerID getProtobuf() {
+ return HddsProtos.ContainerID.newBuilder().setId(id).build();
+ }
+
+ public static ContainerID getFromProtobuf(HddsProtos.ContainerID proto) {
+ return ContainerID.valueOf(proto.getId());
+ }
+
@Override
public boolean equals(final Object o) {
if (this == o) {
@@ -81,14 +97,14 @@ public final class ContainerID implements Comparable<ContainerID> {
final ContainerID that = (ContainerID) o;
return new EqualsBuilder()
- .append(getId(), that.getId())
+ .append(id, that.id)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(61, 71)
- .append(getId())
+ .append(id)
.toHashCode();
}
@@ -96,7 +112,7 @@ public final class ContainerID implements Comparable<ContainerID> {
public int compareTo(final ContainerID that) {
Preconditions.checkNotNull(that);
return new CompareToBuilder()
- .append(this.getId(), that.getId())
+ .append(this.id, that.id)
.build();
}
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
index b8f1a92..e621a4f 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerInfo.java
@@ -121,6 +121,11 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
.build();
}
+ /**
+ * This method is depricated, use {@code containerID()} which returns
+ * {@link ContainerID} object.
+ */
+ @Deprecated
public long getContainerID() {
return containerID;
}
@@ -179,7 +184,7 @@ public class ContainerInfo implements Comparator<ContainerInfo>,
}
public ContainerID containerID() {
- return new ContainerID(getContainerID());
+ return ContainerID.valueOf(containerID);
}
/**
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
index 803aa03..824a1f5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ExcludeList.java
@@ -91,7 +91,7 @@ public class ExcludeList {
HddsProtos.ExcludeListProto excludeListProto) {
ExcludeList excludeList = new ExcludeList();
excludeListProto.getContainerIdsList().forEach(id -> {
- excludeList.addConatinerId(ContainerID.valueof(id));
+ excludeList.addConatinerId(ContainerID.valueOf(id));
});
DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
excludeListProto.getDatanodesList().forEach(dn -> {
diff --git a/hadoop-hdds/interface-client/src/main/proto/hdds.proto b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
index b43a74c..d89e7b4 100644
--- a/hadoop-hdds/interface-client/src/main/proto/hdds.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/hdds.proto
@@ -83,6 +83,10 @@ message PipelineID {
optional UUID uuid128 = 100;
}
+message ContainerID {
+ required uint64 id = 1;
+}
+
enum PipelineState {
PIPELINE_ALLOCATED = 1;
PIPELINE_OPEN = 2;
@@ -181,6 +185,7 @@ enum LifeCycleEvent {
}
message ContainerInfoProto {
+ // Replace int64 with ContainerID message
required int64 containerID = 1;
required LifeCycleState state = 2;
optional PipelineID pipelineID = 3;
@@ -236,6 +241,7 @@ enum ScmOps {
message ExcludeListProto {
repeated string datanodes = 1;
+ // Replace int64 with ContainerID message
repeated int64 containerIds = 2;
repeated PipelineID pipelineIds = 3;
}
@@ -244,6 +250,7 @@ message ExcludeListProto {
* Block ID that uniquely identify a block by SCM.
*/
message ContainerBlockID {
+ // Replace int64 with ContainerID message
required int64 containerID = 1;
required int64 localID = 2;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
index dca1529..2420d61 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java
@@ -59,7 +59,7 @@ public class DatanodeDeletedBlockTransactions {
Set<UUID> dnsWithTransactionCommitted) {
try {
boolean success = false;
- final ContainerID id = ContainerID.valueof(tx.getContainerID());
+ final ContainerID id = ContainerID.valueOf(tx.getContainerID());
final ContainerInfo container = containerManager.getContainer(id);
final Set<ContainerReplica> replicas = containerManager
.getContainerReplicas(id);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index edd3d4a..5d43a75 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -193,7 +193,7 @@ public class DeletedBlockLogImpl
long txID = transactionResult.getTxID();
// set of dns which have successfully committed transaction txId.
dnsWithCommittedTxn = transactionToDNsCommitMap.get(txID);
- final ContainerID containerId = ContainerID.valueof(
+ final ContainerID containerId = ContainerID.valueOf(
transactionResult.getContainerID());
if (dnsWithCommittedTxn == null) {
// Mostly likely it's a retried delete command response.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
index 1b190a2..02dc3f5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/AbstractContainerReportHandler.java
@@ -75,7 +75,7 @@ public class AbstractContainerReportHandler {
final ContainerReplicaProto replicaProto, final EventPublisher publisher)
throws IOException {
final ContainerID containerId = ContainerID
- .valueof(replicaProto.getContainerID());
+ .valueOf(replicaProto.getContainerID());
if (logger.isDebugEnabled()) {
logger.debug("Processing replica of container {} from datanode {}",
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
index e79f268..3d53e29 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerActionsHandler.java
@@ -45,7 +45,7 @@ public class ContainerActionsHandler implements
DatanodeDetails dd = containerReportFromDatanode.getDatanodeDetails();
for (ContainerAction action : containerReportFromDatanode.getReport()
.getContainerActionsList()) {
- ContainerID containerId = ContainerID.valueof(action.getContainerID());
+ ContainerID containerId = ContainerID.valueOf(action.getContainerID());
switch (action.getAction()) {
case CLOSE:
if (LOG.isDebugEnabled()) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
index 36b9a30..3477eea 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerImpl.java
@@ -23,12 +23,14 @@ import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Objects;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
@@ -37,7 +39,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.hdds.utils.UniqueId;
import org.apache.hadoop.hdds.utils.db.Table;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
import org.apache.hadoop.util.Time;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -60,6 +64,7 @@ public class ContainerManagerImpl implements ContainerManagerV2 {
/**
*
*/
+ //Can we move this lock to ContainerStateManager?
private final ReadWriteLock lock;
/**
@@ -93,94 +98,45 @@ public class ContainerManagerImpl implements ContainerManagerV2 {
}
@Override
- public Set<ContainerID> getContainerIDs() {
- lock.readLock().lock();
- try {
- return containerStateManager.getContainerIDs();
- } finally {
- lock.readLock().unlock();
- }
- }
-
- @Override
- public Set<ContainerInfo> getContainers() {
- lock.readLock().lock();
- try {
- return containerStateManager.getContainerIDs().stream().map(id -> {
- try {
- return containerStateManager.getContainer(id);
- } catch (ContainerNotFoundException e) {
- // How can this happen? o_O
- return null;
- }
- }).filter(Objects::nonNull).collect(Collectors.toSet());
- } finally {
- lock.readLock().unlock();
- }
- }
-
- @Override
- public ContainerInfo getContainer(final ContainerID containerID)
+ public ContainerInfo getContainer(final ContainerID id)
throws ContainerNotFoundException {
lock.readLock().lock();
try {
- return containerStateManager.getContainer(containerID);
- } finally {
- lock.readLock().unlock();
- }
- }
-
- @Override
- public Set<ContainerInfo> getContainers(final LifeCycleState state) {
- lock.readLock().lock();
- try {
- return containerStateManager.getContainerIDs(state).stream().map(id -> {
- try {
- return containerStateManager.getContainer(id);
- } catch (ContainerNotFoundException e) {
- // How can this happen? o_O
- return null;
- }
- }).filter(Objects::nonNull).collect(Collectors.toSet());
+ return Optional.ofNullable(containerStateManager
+ .getContainer(id.getProtobuf()))
+ .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
} finally {
lock.readLock().unlock();
}
}
@Override
- public boolean exists(final ContainerID containerID) {
+ public List<ContainerInfo> listContainers(final ContainerID startID,
+ final int count) {
lock.readLock().lock();
try {
- return (containerStateManager.getContainer(containerID) != null);
- } catch (ContainerNotFoundException ex) {
- return false;
+ final long start = startID == null ? 0 : startID.getId();
+ final List<ContainerID> containersIds =
+ new ArrayList<>(containerStateManager.getContainerIDs());
+ Collections.sort(containersIds);
+ return containersIds.stream()
+ .filter(id -> id.getId() > start).limit(count)
+ .map(ContainerID::getProtobuf)
+ .map(containerStateManager::getContainer)
+ .collect(Collectors.toList());
} finally {
lock.readLock().unlock();
}
}
@Override
- public List<ContainerInfo> listContainers(final ContainerID startID,
- final int count) {
+ public List<ContainerInfo> listContainers(final LifeCycleState state) {
lock.readLock().lock();
try {
- final long startId = startID == null ? 0 : startID.getId();
- final List<ContainerID> containersIds =
- new ArrayList<>(containerStateManager.getContainerIDs());
- Collections.sort(containersIds);
- return containersIds.stream()
- .filter(id -> id.getId() > startId)
- .limit(count)
- .map(id -> {
- try {
- return containerStateManager.getContainer(id);
- } catch (ContainerNotFoundException ex) {
- // This can never happen, as we hold lock no one else can remove
- // the container after we got the container ids.
- LOG.warn("Container Missing.", ex);
- return null;
- }
- }).collect(Collectors.toList());
+ return containerStateManager.getContainerIDs(state).stream()
+ .map(ContainerID::getProtobuf)
+ .map(containerStateManager::getContainer)
+ .filter(Objects::nonNull).collect(Collectors.toList());
} finally {
lock.readLock().unlock();
}
@@ -201,8 +157,8 @@ public class ContainerManagerImpl implements ContainerManagerV2 {
replicationFactor + ", State:PipelineState.OPEN");
}
- final ContainerID containerID = containerStateManager
- .getNextContainerID();
+ // TODO: Replace this with Distributed unique id generator.
+ final ContainerID containerID = ContainerID.valueOf(UniqueId.next());
final Pipeline pipeline = pipelines.get(
(int) containerID.getId() % pipelines.size());
@@ -222,43 +178,65 @@ public class ContainerManagerImpl implements ContainerManagerV2 {
if (LOG.isTraceEnabled()) {
LOG.trace("New container allocated: {}", containerInfo);
}
- return containerStateManager.getContainer(containerID);
+ return containerStateManager.getContainer(containerID.getProtobuf());
} finally {
lock.writeLock().unlock();
}
}
@Override
- public void deleteContainer(final ContainerID containerID)
- throws ContainerNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
- }
-
- @Override
- public void updateContainerState(final ContainerID containerID,
+ public void updateContainerState(final ContainerID id,
final LifeCycleEvent event)
- throws ContainerNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
+ throws IOException, InvalidStateTransitionException {
+ final HddsProtos.ContainerID cid = id.getProtobuf();
+ lock.writeLock().lock();
+ try {
+ checkIfContainerExist(cid);
+ containerStateManager.updateContainerState(cid, event);
+ } finally {
+ lock.writeLock().unlock();
+ }
}
@Override
- public Set<ContainerReplica> getContainerReplicas(
- final ContainerID containerID) throws ContainerNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
+ public Set<ContainerReplica> getContainerReplicas(final ContainerID id)
+ throws ContainerNotFoundException {
+ lock.readLock().lock();
+ try {
+ return Optional.ofNullable(containerStateManager
+ .getContainerReplicas(id.getProtobuf()))
+ .orElseThrow(() -> new ContainerNotFoundException("ID " + id));
+ } finally {
+ lock.readLock().unlock();
+ }
}
@Override
- public void updateContainerReplica(final ContainerID containerID,
+ public void updateContainerReplica(final ContainerID id,
final ContainerReplica replica)
throws ContainerNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
+ final HddsProtos.ContainerID cid = id.getProtobuf();
+ lock.writeLock().lock();
+ try {
+ checkIfContainerExist(cid);
+ containerStateManager.updateContainerReplica(cid, replica);
+ } finally {
+ lock.writeLock().unlock();
+ }
}
@Override
- public void removeContainerReplica(final ContainerID containerID,
+ public void removeContainerReplica(final ContainerID id,
final ContainerReplica replica)
throws ContainerNotFoundException, ContainerReplicaNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
+ final HddsProtos.ContainerID cid = id.getProtobuf();
+ lock.writeLock().lock();
+ try {
+ checkIfContainerExist(cid);
+ containerStateManager.removeContainerReplica(cid, replica);
+ } finally {
+ lock.writeLock().unlock();
+ }
}
@Override
@@ -280,6 +258,27 @@ public class ContainerManagerImpl implements ContainerManagerV2 {
}
@Override
+ public void deleteContainer(final ContainerID id)
+ throws IOException {
+ final HddsProtos.ContainerID cid = id.getProtobuf();
+ lock.writeLock().lock();
+ try {
+ checkIfContainerExist(cid);
+ containerStateManager.removeContainer(cid);
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
+ private void checkIfContainerExist(final HddsProtos.ContainerID id)
+ throws ContainerNotFoundException {
+ if (!containerStateManager.contains(id)) {
+ throw new ContainerNotFoundException("Container with id #" +
+ id.getId() + " not found.");
+ }
+ }
+
+ @Override
public void close() throws Exception {
containerStateManager.close();
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
index 863ca4d..dcedb6c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerManagerV2.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.container;
import java.io.IOException;
-import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -27,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
/**
* TODO: Add extensive javadoc.
@@ -38,26 +38,6 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
public interface ContainerManagerV2 extends AutoCloseable {
// TODO: Rename this to ContainerManager
- /**
- * Returns all the container Ids managed by ContainerManager.
- *
- * @return Set of ContainerID
- */
- Set<ContainerID> getContainerIDs();
-
- /**
- * Returns all the containers managed by ContainerManager.
- *
- * @return List of ContainerInfo
- */
- Set<ContainerInfo> getContainers();
-
- /**
- * Returns all the containers which are in the specified state.
- *
- * @return List of ContainerInfo
- */
- Set<ContainerInfo> getContainers(LifeCycleState state);
/**
* Returns the ContainerInfo from the container ID.
@@ -66,8 +46,6 @@ public interface ContainerManagerV2 extends AutoCloseable {
ContainerInfo getContainer(ContainerID containerID)
throws ContainerNotFoundException;
- boolean exists(ContainerID containerID);
-
/**
* Returns containers under certain conditions.
* Search container IDs from start ID(exclusive),
@@ -84,6 +62,14 @@ public interface ContainerManagerV2 extends AutoCloseable {
*/
List<ContainerInfo> listContainers(ContainerID startID, int count);
+
+ /**
+ * Returns all the containers which are in the specified state.
+ *
+ * @return List of ContainerInfo
+ */
+ List<ContainerInfo> listContainers(LifeCycleState state);
+
/**
* Allocates a new container for a given keyName and replication factor.
*
@@ -97,23 +83,15 @@ public interface ContainerManagerV2 extends AutoCloseable {
String owner) throws IOException;
/**
- * Deletes a container from SCM.
- *
- * @param containerID - Container ID
- * @throws IOException
- */
- void deleteContainer(ContainerID containerID)
- throws ContainerNotFoundException;
-
- /**
* Update container state.
* @param containerID - Container ID
* @param event - container life cycle event
* @throws IOException
+ * @throws InvalidStateTransitionException
*/
void updateContainerState(ContainerID containerID,
LifeCycleEvent event)
- throws ContainerNotFoundException;
+ throws IOException, InvalidStateTransitionException;
/**
* Returns the latest list of replicas for given containerId.
@@ -157,18 +135,6 @@ public interface ContainerManagerV2 extends AutoCloseable {
* Returns ContainerInfo which matches the requirements.
* @param size - the amount of space required in the container
* @param owner - the user which requires space in its owned container
- * @param pipeline - pipeline to which the container should belong
- * @return ContainerInfo for the matching container.
- */
- default ContainerInfo getMatchingContainer(long size, String owner,
- Pipeline pipeline) {
- return getMatchingContainer(size, owner, pipeline, Collections.emptyList());
- }
-
- /**
- * Returns ContainerInfo which matches the requirements.
- * @param size - the amount of space required in the container
- * @param owner - the user which requires space in its owned container
* @param pipeline - pipeline to which the container should belong.
* @param excludedContainerIDS - containerIds to be excluded.
* @return ContainerInfo for the matching container.
@@ -185,4 +151,13 @@ public interface ContainerManagerV2 extends AutoCloseable {
*/
// Is it possible to remove this from the Interface?
void notifyContainerReportProcessing(boolean isFullReport, boolean success);
+
+ /**
+ * Deletes a container from SCM.
+ *
+ * @param containerID - Container ID
+ * @throws IOException
+ */
+ void deleteContainer(ContainerID containerID)
+ throws IOException;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 7bca64f..18dffe7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -120,7 +120,7 @@ public class ContainerReportHandler extends AbstractContainerReportHandler
final Set<ContainerID> containersInDn = replicas.parallelStream()
.map(ContainerReplicaProto::getContainerID)
- .map(ContainerID::valueof).collect(Collectors.toSet());
+ .map(ContainerID::valueOf).collect(Collectors.toSet());
final Set<ContainerID> missingReplicas = new HashSet<>(containersInSCM);
missingReplicas.removeAll(containersInDn);
@@ -167,7 +167,7 @@ public class ContainerReportHandler extends AbstractContainerReportHandler
} else if (unknownContainerHandleAction.equals(
UNKNOWN_CONTAINER_ACTION_DELETE)) {
final ContainerID containerId = ContainerID
- .valueof(replicaProto.getContainerID());
+ .valueOf(replicaProto.getContainerID());
deleteReplica(containerId, datanodeDetails, publisher, "unknown");
}
} catch (IOException e) {
@@ -221,7 +221,7 @@ public class ContainerReportHandler extends AbstractContainerReportHandler
for (ContainerReplicaProto replica : replicas) {
try {
final ContainerInfo containerInfo = containerManager.getContainer(
- ContainerID.valueof(replica.getContainerID()));
+ ContainerID.valueOf(replica.getContainerID()));
if (containerInfo.getDeleteTransactionId() >
replica.getDeleteTransactionId()) {
pendingDeleteStatusList.addPendingDeleteStatus(
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index e575c60..0c3772f 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -329,7 +329,7 @@ public class ContainerStateManager {
// In Recon, while adding a 'new' CLOSED container, pipeline will be a
// random ID, and hence be passed down as null.
pipelineManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
}
containerStateCount.incrementAndGet(containerInfo.getState());
}
@@ -371,12 +371,8 @@ public class ContainerStateManager {
void updateDeleteTransactionId(
final Map<Long, Long> deleteTransactionMap) {
deleteTransactionMap.forEach((k, v) -> {
- try {
- containers.getContainerInfo(ContainerID.valueof(k))
- .updateDeleteTransactionId(v);
- } catch (ContainerNotFoundException e) {
- LOG.warn("Exception while updating delete transaction id.", e);
- }
+ containers.getContainerInfo(ContainerID.valueOf(k))
+ .updateDeleteTransactionId(v);
});
}
@@ -432,18 +428,13 @@ public class ContainerStateManager {
private ContainerInfo findContainerWithSpace(final long size,
final NavigableSet<ContainerID> searchSet, final String owner,
final PipelineID pipelineID) {
- try {
- // Get the container with space to meet our request.
- for (ContainerID id : searchSet) {
- final ContainerInfo containerInfo = containers.getContainerInfo(id);
- if (containerInfo.getUsedBytes() + size <= this.containerSize) {
- containerInfo.updateLastUsedTime();
- return containerInfo;
- }
+ // Get the container with space to meet our request.
+ for (ContainerID id : searchSet) {
+ final ContainerInfo containerInfo = containers.getContainerInfo(id);
+ if (containerInfo.getUsedBytes() + size <= this.containerSize) {
+ containerInfo.updateLastUsedTime();
+ return containerInfo;
}
- } catch (ContainerNotFoundException e) {
- // This should not happen!
- LOG.warn("Exception while finding container with space", e);
}
return null;
}
@@ -496,7 +487,11 @@ public class ContainerStateManager {
*/
ContainerInfo getContainer(final ContainerID containerID)
throws ContainerNotFoundException {
- return containers.getContainerInfo(containerID);
+ final ContainerInfo container = containers.getContainerInfo(containerID);
+ if (container != null) {
+ return container;
+ }
+ throw new ContainerNotFoundException(containerID.toString());
}
void close() throws IOException {
@@ -540,6 +535,9 @@ public class ContainerStateManager {
void removeContainer(final ContainerID containerID)
throws ContainerNotFoundException {
+ if (containers.getContainerInfo(containerID) == null) {
+ throw new ContainerNotFoundException(containerID.toString());
+ }
containers.removeContainer(containerID);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
index 4f4456a..7f42a97 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerImpl.java
@@ -24,12 +24,12 @@ import java.util.Map;
import java.util.NavigableSet;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.atomic.AtomicLong;
import com.google.common.base.Preconditions;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -47,15 +47,32 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.Table.KeyValue;
import org.apache.hadoop.hdds.utils.db.TableIterator;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
import org.apache.hadoop.ozone.common.statemachine.StateMachine;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FINALIZE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.QUASI_CLOSE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLOSE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.FORCE_CLOSE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.DELETE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleEvent.CLEANUP;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.QUASI_CLOSED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.DELETED;
+
/**
* Default implementation of ContainerStateManager. This implementation
* holds the Container States in-memory which is backed by a persistent store.
* The persistent store is always kept in sync with the in-memory state changes.
+ *
+ * This class is NOT thread safe. All the calls are idempotent.
*/
public final class ContainerStateManagerImpl
implements ContainerStateManagerV2 {
@@ -72,13 +89,6 @@ public final class ContainerStateManagerImpl
private final long containerSize;
/**
- * The container ID sequence which is used to create new container.
- * This will be removed once we have a Distributed Sequence ID Generator.
- */
- @Deprecated
- private final AtomicLong nextContainerID;
-
- /**
* In-memory representation of Container States.
*/
private final ContainerStateMap containers;
@@ -121,7 +131,6 @@ public final class ContainerStateManagerImpl
this.containerStore = containerStore;
this.stateMachine = newStateMachine();
this.containerSize = getConfiguredContainerSize(conf);
- this.nextContainerID = new AtomicLong(1L);
this.containers = new ContainerStateMap();
this.lastUsedMap = new ConcurrentHashMap<>();
@@ -138,40 +147,45 @@ public final class ContainerStateManagerImpl
final Set<LifeCycleState> finalStates = new HashSet<>();
// These are the steady states of a container.
- finalStates.add(LifeCycleState.OPEN);
- finalStates.add(LifeCycleState.CLOSED);
- finalStates.add(LifeCycleState.DELETED);
+ finalStates.add(CLOSED);
+ finalStates.add(DELETED);
final StateMachine<LifeCycleState, LifeCycleEvent> containerLifecycleSM =
- new StateMachine<>(LifeCycleState.OPEN, finalStates);
-
- containerLifecycleSM.addTransition(LifeCycleState.OPEN,
- LifeCycleState.CLOSING,
- LifeCycleEvent.FINALIZE);
+ new StateMachine<>(OPEN, finalStates);
- containerLifecycleSM.addTransition(LifeCycleState.CLOSING,
- LifeCycleState.QUASI_CLOSED,
- LifeCycleEvent.QUASI_CLOSE);
+ containerLifecycleSM.addTransition(OPEN, CLOSING, FINALIZE);
+ containerLifecycleSM.addTransition(CLOSING, QUASI_CLOSED, QUASI_CLOSE);
+ containerLifecycleSM.addTransition(CLOSING, CLOSED, CLOSE);
+ containerLifecycleSM.addTransition(QUASI_CLOSED, CLOSED, FORCE_CLOSE);
+ containerLifecycleSM.addTransition(CLOSED, DELETING, DELETE);
+ containerLifecycleSM.addTransition(DELETING, DELETED, CLEANUP);
- containerLifecycleSM.addTransition(LifeCycleState.CLOSING,
- LifeCycleState.CLOSED,
- LifeCycleEvent.CLOSE);
-
- containerLifecycleSM.addTransition(LifeCycleState.QUASI_CLOSED,
- LifeCycleState.CLOSED,
- LifeCycleEvent.FORCE_CLOSE);
-
- containerLifecycleSM.addTransition(LifeCycleState.CLOSED,
- LifeCycleState.DELETING,
- LifeCycleEvent.DELETE);
-
- containerLifecycleSM.addTransition(LifeCycleState.DELETING,
- LifeCycleState.DELETED,
- LifeCycleEvent.CLEANUP);
+ /* The following set of transitions are to make state machine
+ * transition idempotent.
+ */
+ makeStateTransitionIdempotent(containerLifecycleSM, FINALIZE,
+ CLOSING, QUASI_CLOSED, CLOSED, DELETING, DELETED);
+ makeStateTransitionIdempotent(containerLifecycleSM, QUASI_CLOSE,
+ QUASI_CLOSED, CLOSED, DELETING, DELETED);
+ makeStateTransitionIdempotent(containerLifecycleSM, CLOSE,
+ CLOSED, DELETING, DELETED);
+ makeStateTransitionIdempotent(containerLifecycleSM, FORCE_CLOSE,
+ CLOSED, DELETING, DELETED);
+ makeStateTransitionIdempotent(containerLifecycleSM, DELETE,
+ DELETING, DELETED);
+ makeStateTransitionIdempotent(containerLifecycleSM, CLEANUP, DELETED);
return containerLifecycleSM;
}
+ private void makeStateTransitionIdempotent(
+ final StateMachine<LifeCycleState, LifeCycleEvent> sm,
+ final LifeCycleEvent event, final LifeCycleState... states) {
+ for (LifeCycleState state : states) {
+ sm.addTransition(state, state, event);
+ }
+ }
+
/**
* Returns the configured container size.
*
@@ -197,29 +211,27 @@ public final class ContainerStateManagerImpl
final ContainerInfo container = iterator.next().getValue();
Preconditions.checkNotNull(container);
containers.addContainer(container);
- nextContainerID.set(Long.max(container.containerID().getId(),
- nextContainerID.get()));
if (container.getState() == LifeCycleState.OPEN) {
try {
pipelineManager.addContainerToPipeline(container.getPipelineID(),
- ContainerID.valueof(container.getContainerID()));
+ container.containerID());
} catch (PipelineNotFoundException ex) {
LOG.warn("Found container {} which is in OPEN state with " +
"pipeline {} that does not exist. Marking container for " +
"closing.", container, container.getPipelineID());
- updateContainerState(container.containerID(),
- LifeCycleEvent.FINALIZE);
+ try {
+ updateContainerState(container.containerID().getProtobuf(),
+ LifeCycleEvent.FINALIZE);
+ } catch (InvalidStateTransitionException e) {
+ // This cannot happen.
+ LOG.warn("Unable to finalize Container {}.", container);
+ }
}
}
}
}
@Override
- public ContainerID getNextContainerID() {
- return ContainerID.valueof(nextContainerID.get());
- }
-
- @Override
public Set<ContainerID> getContainerIDs() {
return containers.getAllContainerIDs();
}
@@ -230,15 +242,9 @@ public final class ContainerStateManagerImpl
}
@Override
- public ContainerInfo getContainer(final ContainerID containerID)
- throws ContainerNotFoundException {
- return containers.getContainerInfo(containerID);
- }
-
- @Override
- public Set<ContainerReplica> getContainerReplicas(
- final ContainerID containerID) throws ContainerNotFoundException {
- return containers.getContainerReplicas(containerID);
+ public ContainerInfo getContainer(final HddsProtos.ContainerID id) {
+ return containers.getContainerInfo(
+ ContainerID.getFromProtobuf(id));
}
@Override
@@ -254,32 +260,63 @@ public final class ContainerStateManagerImpl
final ContainerID containerID = container.containerID();
final PipelineID pipelineID = container.getPipelineID();
- /*
- * TODO:
- * Check if the container already exist in in ContainerStateManager.
- * This optimization can be done after moving ContainerNotFoundException
- * from ContainerStateMap to ContainerManagerImpl.
- */
+ if (!containers.contains(containerID)) {
+ containerStore.put(containerID, container);
+ try {
+ containers.addContainer(container);
+ pipelineManager.addContainerToPipeline(pipelineID, containerID);
+ } catch (Exception ex) {
+ containers.removeContainer(containerID);
+ containerStore.delete(containerID);
+ throw ex;
+ }
+ }
+ }
- containerStore.put(containerID, container);
- containers.addContainer(container);
- pipelineManager.addContainerToPipeline(pipelineID, containerID);
- nextContainerID.incrementAndGet();
+ @Override
+ public boolean contains(final HddsProtos.ContainerID id) {
+ // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
+ return containers.contains(ContainerID.getFromProtobuf(id));
}
- void updateContainerState(final ContainerID containerID,
- final LifeCycleEvent event)
- throws IOException {
- throw new UnsupportedOperationException("Not yet implemented!");
+ public void updateContainerState(final HddsProtos.ContainerID containerID,
+ final LifeCycleEvent event)
+ throws IOException, InvalidStateTransitionException {
+ // TODO: Remove the protobuf conversion after fixing ContainerStateMap.
+ final ContainerID id = ContainerID.getFromProtobuf(containerID);
+ if (containers.contains(id)) {
+ final ContainerInfo info = containers.getContainerInfo(id);
+ final LifeCycleState oldState = info.getState();
+ final LifeCycleState newState = stateMachine.getNextState(
+ info.getState(), event);
+ if (newState.getNumber() > oldState.getNumber()) {
+ containers.updateState(id, info.getState(), newState);
+ }
+ }
}
- void updateContainerReplica(final ContainerID containerID,
- final ContainerReplica replica)
- throws ContainerNotFoundException {
- containers.updateContainerReplica(containerID, replica);
+ @Override
+ public Set<ContainerReplica> getContainerReplicas(
+ final HddsProtos.ContainerID id) {
+ return containers.getContainerReplicas(
+ ContainerID.getFromProtobuf(id));
}
+ @Override
+ public void updateContainerReplica(final HddsProtos.ContainerID id,
+ final ContainerReplica replica) {
+ containers.updateContainerReplica(ContainerID.getFromProtobuf(id),
+ replica);
+ }
+
+ @Override
+ public void removeContainerReplica(final HddsProtos.ContainerID id,
+ final ContainerReplica replica) {
+ containers.removeContainerReplica(ContainerID.getFromProtobuf(id),
+ replica);
+
+ }
void updateDeleteTransactionId(
final Map<ContainerID, Long> deleteTransactionMap) {
@@ -291,23 +328,14 @@ public final class ContainerStateManagerImpl
throw new UnsupportedOperationException("Not yet implemented!");
}
-
NavigableSet<ContainerID> getMatchingContainerIDs(final String owner,
final ReplicationType type, final ReplicationFactor factor,
final LifeCycleState state) {
throw new UnsupportedOperationException("Not yet implemented!");
}
- void removeContainerReplica(final ContainerID containerID,
- final ContainerReplica replica)
- throws ContainerNotFoundException, ContainerReplicaNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
- }
-
-
- void removeContainer(final ContainerID containerID)
- throws ContainerNotFoundException {
- throw new UnsupportedOperationException("Not yet implemented!");
+ public void removeContainer(final HddsProtos.ContainerID id) {
+ containers.removeContainer(ContainerID.getFromProtobuf(id));
}
@Override
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
index 3520b01..3a0cf21 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManagerV2.java
@@ -20,9 +20,11 @@ package org.apache.hadoop.hdds.scm.container;
import java.io.IOException;
import java.util.Set;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ContainerInfoProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.scm.metadata.Replicate;
+import org.apache.hadoop.ozone.common.statemachine.InvalidStateTransitionException;
/**
* A ContainerStateManager is responsible for keeping track of all the
@@ -94,10 +96,9 @@ public interface ContainerStateManagerV2 {
************************************************************************/
/**
- * Returns a new container ID which can be used for allocating a new
- * container.
+ *
*/
- ContainerID getNextContainerID();
+ boolean contains(HddsProtos.ContainerID containerID);
/**
* Returns the ID of all the managed containers.
@@ -114,14 +115,24 @@ public interface ContainerStateManagerV2 {
/**
*
*/
- ContainerInfo getContainer(ContainerID containerID)
- throws ContainerNotFoundException;
+ ContainerInfo getContainer(HddsProtos.ContainerID id);
+
+ /**
+ *
+ */
+ Set<ContainerReplica> getContainerReplicas(HddsProtos.ContainerID id);
/**
*
*/
- Set<ContainerReplica> getContainerReplicas(ContainerID containerID)
- throws ContainerNotFoundException;
+ void updateContainerReplica(HddsProtos.ContainerID id,
+ ContainerReplica replica);
+
+ /**
+ *
+ */
+ void removeContainerReplica(HddsProtos.ContainerID id,
+ ContainerReplica replica);
/**
*
@@ -133,5 +144,20 @@ public interface ContainerStateManagerV2 {
/**
*
*/
+ @Replicate
+ void updateContainerState(HddsProtos.ContainerID id,
+ HddsProtos.LifeCycleEvent event)
+ throws IOException, InvalidStateTransitionException;
+
+ /**
+ *
+ */
+ @Replicate
+ void removeContainer(HddsProtos.ContainerID containerInfo)
+ throws IOException;
+
+ /**
+ *
+ */
void close() throws Exception;
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index ed87565..3317f42 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -71,7 +71,7 @@ public class IncrementalContainerReportHandler extends
for (ContainerReplicaProto replicaProto :
report.getReport().getReportList()) {
try {
- final ContainerID id = ContainerID.valueof(
+ final ContainerID id = ContainerID.valueOf(
replicaProto.getContainerID());
if (!replicaProto.getState().equals(
ContainerReplicaProto.State.DELETED)) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
index 2117e70..f59e401 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/SCMContainerManager.java
@@ -130,7 +130,7 @@ public class SCMContainerManager implements ContainerManager {
try {
if (container.getState() == LifeCycleState.OPEN) {
pipelineManager.addContainerToPipeline(container.getPipelineID(),
- ContainerID.valueof(container.getContainerID()));
+ ContainerID.valueOf(container.getContainerID()));
}
} catch (PipelineNotFoundException ex) {
LOG.warn("Found a Container {} which is in {} state with pipeline {} " +
@@ -216,7 +216,9 @@ public class SCMContainerManager implements ContainerManager {
public boolean exists(ContainerID containerID) {
lock.lock();
try {
- return (containerStateManager.getContainer(containerID) != null);
+ Preconditions.checkNotNull(
+ containerStateManager.getContainer(containerID));
+ return true;
} catch (ContainerNotFoundException e) {
return false;
} finally {
@@ -290,7 +292,7 @@ public class SCMContainerManager implements ContainerManager {
// PipelineStateManager.
pipelineManager.removeContainerFromPipeline(
containerInfo.getPipelineID(),
- new ContainerID(containerInfo.getContainerID()));
+ containerInfo.containerID());
throw ex;
}
return containerInfo;
@@ -404,7 +406,8 @@ public class SCMContainerManager implements ContainerManager {
try(BatchOperation batchOperation = batchHandler.initBatchOperation()) {
for (Map.Entry< Long, Long > entry : deleteTransactionMap.entrySet()) {
long containerID = entry.getKey();
- ContainerID containerIdObject = new ContainerID(containerID);
+
+ ContainerID containerIdObject = ContainerID.valueOf(containerID);
ContainerInfo containerInfo =
containerStore.get(containerIdObject);
ContainerInfo containerInfoInMem = containerStateManager
@@ -493,7 +496,7 @@ public class SCMContainerManager implements ContainerManager {
throws IOException {
try {
containerStore
- .put(new ContainerID(containerInfo.getContainerID()), containerInfo);
+ .put(containerInfo.containerID(), containerInfo);
// Incrementing here, as allocateBlock to create a container calls
// getMatchingContainer() and finally calls this API to add newly
// created container to DB.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
index af44a8a..61cff09 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerAttribute.java
@@ -153,7 +153,7 @@ public class ContainerAttribute<T> {
* @return true or false
*/
public boolean hasContainerID(T key, int id) {
- return hasContainerID(key, ContainerID.valueof(id));
+ return hasContainerID(key, ContainerID.valueOf(id));
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index d71049b..4d143e0 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -18,32 +18,30 @@
package org.apache.hadoop.hdds.scm.container.states;
+import java.util.Set;
+import java.util.Collections;
+import java.util.Map;
+import java.util.NavigableSet;
+import java.util.TreeSet;
+import java.util.concurrent.locks.ReadWriteLock;
+import java.util.concurrent.locks.ReentrantReadWriteLock;
+import java.util.concurrent.ConcurrentHashMap;
+
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaNotFoundException;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.util.Set;
-import java.util.Collections;
-import java.util.Map;
-import java.util.NavigableSet;
-import java.util.TreeSet;
-import java.util.concurrent.locks.ReadWriteLock;
-import java.util.concurrent.locks.ReentrantReadWriteLock;
-import java.util.concurrent.ConcurrentHashMap;
import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
- .CONTAINER_EXISTS;
-import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
.FAILED_TO_CHANGE_CONTAINER_STATE;
/**
@@ -76,6 +74,8 @@ import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes
* select a container that belongs to user1, with Ratis replication which can
* make 3 copies of data. The fact that we will look for open containers by
* default and if we cannot find them we will add new containers.
+ *
+ * All the calls are idempotent.
*/
public class ContainerStateMap {
private static final Logger LOG =
@@ -95,6 +95,7 @@ public class ContainerStateMap {
// Container State Map lock should be held before calling into
// Update ContainerAttributes. The consistency of ContainerAttributes is
// protected by this lock.
+ // Can we remove this lock?
private final ReadWriteLock lock;
/**
@@ -120,56 +121,57 @@ public class ContainerStateMap {
public void addContainer(final ContainerInfo info)
throws SCMException {
Preconditions.checkNotNull(info, "Container Info cannot be null");
- Preconditions.checkArgument(info.getReplicationFactor().getNumber() > 0,
- "ExpectedReplicaCount should be greater than 0");
-
lock.writeLock().lock();
try {
final ContainerID id = info.containerID();
- if (containerMap.putIfAbsent(id, info) != null) {
- LOG.debug("Duplicate container ID detected. {}", id);
- throw new
- SCMException("Duplicate container ID detected.",
- CONTAINER_EXISTS);
+ if (!contains(id)) {
+ containerMap.put(id, info);
+ lifeCycleStateMap.insert(info.getState(), id);
+ ownerMap.insert(info.getOwner(), id);
+ factorMap.insert(info.getReplicationFactor(), id);
+ typeMap.insert(info.getReplicationType(), id);
+ replicaMap.put(id, ConcurrentHashMap.newKeySet());
+
+ // Flush the cache of this container type, will be added later when
+ // get container queries are executed.
+ flushCache(info);
+ LOG.trace("Container {} added to ContainerStateMap.", id);
}
-
- lifeCycleStateMap.insert(info.getState(), id);
- ownerMap.insert(info.getOwner(), id);
- factorMap.insert(info.getReplicationFactor(), id);
- typeMap.insert(info.getReplicationType(), id);
- replicaMap.put(id, ConcurrentHashMap.newKeySet());
-
- // Flush the cache of this container type, will be added later when
- // get container queries are executed.
- flushCache(info);
- LOG.trace("Created container with {} successfully.", id);
} finally {
lock.writeLock().unlock();
}
}
+ public boolean contains(final ContainerID id) {
+ lock.readLock().lock();
+ try {
+ return containerMap.containsKey(id);
+ } finally {
+ lock.readLock().unlock();
+ }
+ }
+
/**
* Removes a Container Entry from ContainerStateMap.
*
- * @param containerID - ContainerID
- * @throws SCMException - throws if create failed.
+ * @param id - ContainerID
*/
- public void removeContainer(final ContainerID containerID)
- throws ContainerNotFoundException {
- Preconditions.checkNotNull(containerID, "ContainerID cannot be null");
+ public void removeContainer(final ContainerID id) {
+ Preconditions.checkNotNull(id, "ContainerID cannot be null");
lock.writeLock().lock();
try {
- checkIfContainerExist(containerID);
- // Should we revert back to the original state if any of the below
- // remove operation fails?
- final ContainerInfo info = containerMap.remove(containerID);
- lifeCycleStateMap.remove(info.getState(), containerID);
- ownerMap.remove(info.getOwner(), containerID);
- factorMap.remove(info.getReplicationFactor(), containerID);
- typeMap.remove(info.getReplicationType(), containerID);
- // Flush the cache of this container type.
- flushCache(info);
- LOG.trace("Removed container with {} successfully.", containerID);
+ if (contains(id)) {
+ // Should we revert back to the original state if any of the below
+ // remove operation fails?
+ final ContainerInfo info = containerMap.remove(id);
+ lifeCycleStateMap.remove(info.getState(), id);
+ ownerMap.remove(info.getOwner(), id);
+ factorMap.remove(info.getReplicationFactor(), id);
+ typeMap.remove(info.getReplicationType(), id);
+ // Flush the cache of this container type.
+ flushCache(info);
+ LOG.trace("Container {} removed from ContainerStateMap.", id);
+ }
} finally {
lock.writeLock().unlock();
}
@@ -179,13 +181,11 @@ public class ContainerStateMap {
* Returns the latest state of Container from SCM's Container State Map.
*
* @param containerID - ContainerID
- * @return container info, if found.
+ * @return container info, if found else null.
*/
- public ContainerInfo getContainerInfo(final ContainerID containerID)
- throws ContainerNotFoundException {
+ public ContainerInfo getContainerInfo(final ContainerID containerID) {
lock.readLock().lock();
try {
- checkIfContainerExist(containerID);
return containerMap.get(containerID);
} finally {
lock.readLock().unlock();
@@ -194,19 +194,18 @@ public class ContainerStateMap {
/**
* Returns the latest list of DataNodes where replica for given containerId
- * exist. Throws an SCMException if no entry is found for given containerId.
+ * exist.
*
* @param containerID
* @return Set<DatanodeDetails>
*/
public Set<ContainerReplica> getContainerReplicas(
- final ContainerID containerID) throws ContainerNotFoundException {
+ final ContainerID containerID) {
Preconditions.checkNotNull(containerID);
lock.readLock().lock();
try {
- checkIfContainerExist(containerID);
- return Collections
- .unmodifiableSet(replicaMap.get(containerID));
+ final Set<ContainerReplica> replicas = replicaMap.get(containerID);
+ return replicas == null ? null : Collections.unmodifiableSet(replicas);
} finally {
lock.readLock().unlock();
}
@@ -221,14 +220,15 @@ public class ContainerStateMap {
* @param replica
*/
public void updateContainerReplica(final ContainerID containerID,
- final ContainerReplica replica) throws ContainerNotFoundException {
+ final ContainerReplica replica) {
Preconditions.checkNotNull(containerID);
lock.writeLock().lock();
try {
- checkIfContainerExist(containerID);
- Set<ContainerReplica> replicas = replicaMap.get(containerID);
- replicas.remove(replica);
- replicas.add(replica);
+ if (contains(containerID)) {
+ final Set<ContainerReplica> replicas = replicaMap.get(containerID);
+ replicas.remove(replica);
+ replicas.add(replica);
+ }
} finally {
lock.writeLock().unlock();
}
@@ -242,18 +242,13 @@ public class ContainerStateMap {
* @return True of dataNode is removed successfully else false.
*/
public void removeContainerReplica(final ContainerID containerID,
- final ContainerReplica replica)
- throws ContainerNotFoundException, ContainerReplicaNotFoundException {
+ final ContainerReplica replica) {
Preconditions.checkNotNull(containerID);
Preconditions.checkNotNull(replica);
-
lock.writeLock().lock();
try {
- checkIfContainerExist(containerID);
- if(!replicaMap.get(containerID).remove(replica)) {
- throw new ContainerReplicaNotFoundException(
- "Container #"
- + containerID.getId() + ", replica: " + replica);
+ if (contains(containerID)) {
+ replicaMap.get(containerID).remove(replica);
}
} finally {
lock.writeLock().unlock();
@@ -264,15 +259,16 @@ public class ContainerStateMap {
* Just update the container State.
* @param info ContainerInfo.
*/
- public void updateContainerInfo(final ContainerInfo info)
- throws ContainerNotFoundException {
+ public void updateContainerInfo(final ContainerInfo info) {
+ Preconditions.checkNotNull(info);
+ final ContainerID id = info.containerID();
lock.writeLock().lock();
try {
- Preconditions.checkNotNull(info);
- checkIfContainerExist(info.containerID());
- final ContainerInfo currentInfo = containerMap.get(info.containerID());
- flushCache(info, currentInfo);
- containerMap.put(info.containerID(), info);
+ if (contains(id)) {
+ final ContainerInfo currentInfo = containerMap.get(id);
+ flushCache(info, currentInfo);
+ containerMap.put(id, info);
+ }
} finally {
lock.writeLock().unlock();
}
@@ -287,12 +283,16 @@ public class ContainerStateMap {
* @throws SCMException - in case of failure.
*/
public void updateState(ContainerID containerID, LifeCycleState currentState,
- LifeCycleState newState) throws SCMException, ContainerNotFoundException {
+ LifeCycleState newState) throws SCMException {
Preconditions.checkNotNull(currentState);
Preconditions.checkNotNull(newState);
lock.writeLock().lock();
try {
- checkIfContainerExist(containerID);
+ if (!contains(containerID)) {
+ return;
+ }
+
+ // TODO: Simplify this logic.
final ContainerInfo currentInfo = containerMap.get(containerID);
try {
currentInfo.setState(newState);
@@ -340,7 +340,12 @@ public class ContainerStateMap {
}
public Set<ContainerID> getAllContainerIDs() {
- return Collections.unmodifiableSet(containerMap.keySet());
+ lock.readLock().lock();
+ try {
+ return Collections.unmodifiableSet(containerMap.keySet());
+ } finally {
+ lock.readLock().unlock();
+ }
}
/**
@@ -535,13 +540,4 @@ public class ContainerStateMap {
}
}
- // TODO: Move container not found exception to upper layer.
- private void checkIfContainerExist(ContainerID containerID)
- throws ContainerNotFoundException {
- if (!containerMap.containsKey(containerID)) {
- throw new ContainerNotFoundException("Container with id #" +
- containerID.getId() + " not found.");
- }
- }
-
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
index 87c9e91..cb02e31 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/ContainerIDCodec.java
@@ -38,11 +38,11 @@ public class ContainerIDCodec implements Codec<ContainerID> {
@Override
public ContainerID fromPersistedFormat(byte[] rawData) throws IOException {
- return new ContainerID(longCodec.fromPersistedFormat(rawData));
+ return ContainerID.valueOf(longCodec.fromPersistedFormat(rawData));
}
@Override
public ContainerID copyObject(ContainerID object) {
- return new ContainerID(object.getId());
+ return ContainerID.valueOf(object.getId());
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index ede679d..594527a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -203,7 +203,7 @@ public class SCMClientProtocolServer implements
getScm().checkAdminAccess(remoteUser);
try {
return scm.getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
} catch (IOException ex) {
auditSuccess = false;
AUDIT.logReadFailure(
@@ -222,7 +222,7 @@ public class SCMClientProtocolServer implements
private ContainerWithPipeline getContainerWithPipelineCommon(
long containerID) throws IOException {
- final ContainerID cid = ContainerID.valueof(containerID);
+ final ContainerID cid = ContainerID.valueOf(containerID);
final ContainerInfo container = scm.getContainerManager()
.getContainer(cid);
@@ -268,13 +268,13 @@ public class SCMClientProtocolServer implements
AUDIT.logReadSuccess(buildAuditMessageForSuccess(
SCMAction.GET_CONTAINER_WITH_PIPELINE,
Collections.singletonMap("containerID",
- ContainerID.valueof(containerID).toString())));
+ ContainerID.valueOf(containerID).toString())));
return cp;
} catch (IOException ex) {
AUDIT.logReadFailure(buildAuditMessageForFailure(
SCMAction.GET_CONTAINER_WITH_PIPELINE,
Collections.singletonMap("containerID",
- ContainerID.valueof(containerID).toString()), ex));
+ ContainerID.valueOf(containerID).toString()), ex));
throw ex;
}
}
@@ -291,13 +291,13 @@ public class SCMClientProtocolServer implements
try {
ContainerWithPipeline cp = getContainerWithPipelineCommon(containerID);
cpList.add(cp);
- strContainerIDs.append(ContainerID.valueof(containerID).toString());
+ strContainerIDs.append(ContainerID.valueOf(containerID).toString());
strContainerIDs.append(",");
} catch (IOException ex) {
AUDIT.logReadFailure(buildAuditMessageForFailure(
SCMAction.GET_CONTAINER_WITH_PIPELINE_BATCH,
Collections.singletonMap("containerID",
- ContainerID.valueof(containerID).toString()), ex));
+ ContainerID.valueOf(containerID).toString()), ex));
throw ex;
}
}
@@ -337,7 +337,7 @@ public class SCMClientProtocolServer implements
// "null" is assigned, so that its handled in the
// scm.getContainerManager().listContainer method
final ContainerID containerId = startContainerID != 0 ? ContainerID
- .valueof(startContainerID) : null;
+ .valueOf(startContainerID) : null;
return scm.getContainerManager().
listContainer(containerId, count);
} catch (Exception ex) {
@@ -364,7 +364,7 @@ public class SCMClientProtocolServer implements
try {
getScm().checkAdminAccess(remoteUser);
scm.getContainerManager().deleteContainer(
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
} catch (Exception ex) {
auditSuccess = false;
AUDIT.logWriteFailure(
@@ -407,7 +407,7 @@ public class SCMClientProtocolServer implements
auditMap.put("remoteUser", remoteUser);
try {
scm.checkAdminAccess(remoteUser);
- final ContainerID cid = ContainerID.valueof(containerID);
+ final ContainerID cid = ContainerID.valueOf(containerID);
final HddsProtos.LifeCycleState state = scm.getContainerManager()
.getContainer(cid).getState();
if (!state.equals(HddsProtos.LifeCycleState.OPEN)) {
@@ -415,7 +415,7 @@ public class SCMClientProtocolServer implements
ResultCodes.UNEXPECTED_CONTAINER_STATE);
}
scm.getEventQueue().fireEvent(SCMEvents.CLOSE_CONTAINER,
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
SCMAction.CLOSE_CONTAINER, auditMap));
} catch (Exception ex) {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index b17729b..4513857 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -774,7 +774,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
@VisibleForTesting
public ContainerInfo getContainerInfo(long containerID) throws
IOException {
- return containerManager.getContainer(ContainerID.valueof(containerID));
+ return containerManager.getContainer(ContainerID.valueOf(containerID));
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index d4e2553..96147c5 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -410,13 +410,14 @@ public class TestDeletedBlockLog {
.build();
ContainerInfo.Builder builder = new ContainerInfo.Builder();
- builder.setPipelineID(pipeline.getId())
+ builder.setContainerID(containerID)
+ .setPipelineID(pipeline.getId())
.setReplicationType(pipeline.getType())
.setReplicationFactor(pipeline.getFactor());
ContainerInfo containerInfo = builder.build();
Mockito.doReturn(containerInfo).when(containerManager)
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
final Set<ContainerReplica> replicaSet = dns.stream()
.map(datanodeDetails -> ContainerReplica.newBuilder()
@@ -426,7 +427,7 @@ public class TestDeletedBlockLog {
.build())
.collect(Collectors.toSet());
when(containerManager.getContainerReplicas(
- ContainerID.valueof(containerID)))
+ ContainerID.valueOf(containerID)))
.thenReturn(replicaSet);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index daa9726..fbe4d42 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -126,7 +126,7 @@ public class TestCloseContainerEventHandler {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(CloseContainerEventHandler.LOG);
eventQueue.fireEvent(CLOSE_CONTAINER,
- new ContainerID(Math.abs(RandomUtils.nextInt())));
+ ContainerID.valueOf(Math.abs(RandomUtils.nextInt())));
eventQueue.processAll(1000);
Assert.assertTrue(logCapturer.getOutput()
.contains("Close container Event triggered for container"));
@@ -138,7 +138,7 @@ public class TestCloseContainerEventHandler {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(CloseContainerEventHandler.LOG);
eventQueue.fireEvent(CLOSE_CONTAINER,
- new ContainerID(id));
+ ContainerID.valueOf(id));
eventQueue.processAll(1000);
Assert.assertTrue(logCapturer.getOutput()
.contains("Failed to close the container"));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
index 3434825..09b51f0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerActionsHandler.java
@@ -61,7 +61,7 @@ public class TestContainerActionsHandler {
queue.fireEvent(SCMEvents.CONTAINER_ACTIONS, containerActions);
queue.processAll(1000L);
verify(closeContainerEventHandler, times(1))
- .onMessage(ContainerID.valueof(1L), queue);
+ .onMessage(ContainerID.valueOf(1L), queue);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
index 022d392..6492e0a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerManagerImpl.java
@@ -79,13 +79,37 @@ public class TestContainerManagerImpl {
@Test
public void testAllocateContainer() throws Exception {
- Assert.assertTrue(containerManager.getContainerIDs().isEmpty());
+ Assert.assertTrue(
+ containerManager.listContainers(null, Integer.MAX_VALUE).isEmpty());
final ContainerInfo container = containerManager.allocateContainer(
HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE, "admin");
- Assert.assertEquals(1, containerManager.getContainerIDs().size());
+ Assert.assertEquals(1,
+ containerManager.listContainers(null, Integer.MAX_VALUE).size());
Assert.assertNotNull(containerManager.getContainer(
container.containerID()));
}
-}
\ No newline at end of file
+ @Test
+ public void testUpdateContainerState() throws Exception {
+ final ContainerInfo container = containerManager.allocateContainer(
+ HddsProtos.ReplicationType.RATIS,
+ HddsProtos.ReplicationFactor.THREE, "admin");
+ final ContainerID cid = container.containerID();
+ Assert.assertEquals(HddsProtos.LifeCycleState.OPEN,
+ containerManager.getContainer(cid).getState());
+ containerManager.updateContainerState(cid,
+ HddsProtos.LifeCycleEvent.FINALIZE);
+ Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
+ containerManager.getContainer(cid).getState());
+ containerManager.updateContainerState(cid,
+ HddsProtos.LifeCycleEvent.QUASI_CLOSE);
+ Assert.assertEquals(HddsProtos.LifeCycleState.QUASI_CLOSED,
+ containerManager.getContainer(cid).getState());
+ containerManager.updateContainerState(cid,
+ HddsProtos.LifeCycleEvent.FORCE_CLOSE);
+ Assert.assertEquals(HddsProtos.LifeCycleState.CLOSED,
+ containerManager.getContainer(cid).getState());
+ }
+
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index 2565076..a45d637 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -287,7 +287,7 @@ public class TestSCMContainerManager {
@Test
public void testgetNoneExistentContainer() {
try {
- containerManager.getContainer(ContainerID.valueof(
+ containerManager.getContainer(ContainerID.valueOf(
random.nextInt() & Integer.MAX_VALUE));
Assert.fail();
} catch (ContainerNotFoundException ex) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
index 63cc9bf..b7b8988 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestContainerAttribute.java
@@ -39,7 +39,7 @@ public class TestContainerAttribute {
@Test
public void testInsert() throws SCMException {
ContainerAttribute<Integer> containerAttribute = new ContainerAttribute<>();
- ContainerID id = new ContainerID(42);
+ ContainerID id = ContainerID.valueOf(42);
containerAttribute.insert(1, id);
Assert.assertEquals(1,
containerAttribute.getCollection(1).size());
@@ -47,7 +47,7 @@ public class TestContainerAttribute {
// Insert again and verify that it overwrites an existing value.
ContainerID newId =
- new ContainerID(42);
+ ContainerID.valueOf(42);
containerAttribute.insert(1, newId);
Assert.assertEquals(1,
containerAttribute.getCollection(1).size());
@@ -59,7 +59,7 @@ public class TestContainerAttribute {
ContainerAttribute<Integer> containerAttribute = new ContainerAttribute<>();
for (int x = 1; x < 42; x++) {
- containerAttribute.insert(1, new ContainerID(x));
+ containerAttribute.insert(1, ContainerID.valueOf(x));
}
Assert.assertTrue(containerAttribute.hasKey(1));
for (int x = 1; x < 42; x++) {
@@ -67,7 +67,7 @@ public class TestContainerAttribute {
}
Assert.assertFalse(containerAttribute.hasContainerID(1,
- new ContainerID(42)));
+ ContainerID.valueOf(42)));
}
@Test
@@ -76,7 +76,7 @@ public class TestContainerAttribute {
ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
for (String k : keyslist) {
for (int x = 1; x < 101; x++) {
- containerAttribute.insert(k, new ContainerID(x));
+ containerAttribute.insert(k, ContainerID.valueOf(x));
}
}
for (String k : keyslist) {
@@ -96,16 +96,16 @@ public class TestContainerAttribute {
for (String k : keyslist) {
for (int x = 1; x < 101; x++) {
- containerAttribute.insert(k, new ContainerID(x));
+ containerAttribute.insert(k, ContainerID.valueOf(x));
}
}
for (int x = 1; x < 101; x += 2) {
- containerAttribute.remove("Key1", new ContainerID(x));
+ containerAttribute.remove("Key1", ContainerID.valueOf(x));
}
for (int x = 1; x < 101; x += 2) {
Assert.assertFalse(containerAttribute.hasContainerID("Key1",
- new ContainerID(x)));
+ ContainerID.valueOf(x)));
}
Assert.assertEquals(100,
@@ -125,7 +125,7 @@ public class TestContainerAttribute {
String key3 = "Key3";
ContainerAttribute<String> containerAttribute = new ContainerAttribute<>();
- ContainerID id = new ContainerID(42);
+ ContainerID id = ContainerID.valueOf(42);
containerAttribute.insert(key1, id);
Assert.assertTrue(containerAttribute.hasContainerID(key1, id));
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 50b962d..3d77e9d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -193,19 +193,19 @@ public class TestDeadNodeHandler {
deadNodeHandler.onMessage(datanode1, publisher);
Set<ContainerReplica> container1Replicas = containerManager
- .getContainerReplicas(new ContainerID(container1.getContainerID()));
+ .getContainerReplicas(ContainerID.valueOf(container1.getContainerID()));
Assert.assertEquals(1, container1Replicas.size());
Assert.assertEquals(datanode2,
container1Replicas.iterator().next().getDatanodeDetails());
Set<ContainerReplica> container2Replicas = containerManager
- .getContainerReplicas(new ContainerID(container2.getContainerID()));
+ .getContainerReplicas(ContainerID.valueOf(container2.getContainerID()));
Assert.assertEquals(1, container2Replicas.size());
Assert.assertEquals(datanode2,
container2Replicas.iterator().next().getDatanodeDetails());
Set<ContainerReplica> container3Replicas = containerManager
- .getContainerReplicas(new ContainerID(container3.getContainerID()));
+ .getContainerReplicas(container3.containerID());
Assert.assertEquals(1, container3Replicas.size());
Assert.assertEquals(datanode3,
container3Replicas.iterator().next().getDatanodeDetails());
@@ -216,7 +216,7 @@ public class TestDeadNodeHandler {
throws ContainerNotFoundException {
for (DatanodeDetails datanode : datanodes) {
contManager.updateContainerReplica(
- new ContainerID(container.getContainerID()),
+ ContainerID.valueOf(container.getContainerID()),
ContainerReplica.newBuilder()
.setContainerState(ContainerReplicaProto.State.OPEN)
.setContainerID(container.containerID())
@@ -236,7 +236,7 @@ public class TestDeadNodeHandler {
nodeManager
.setContainers(datanode,
Arrays.stream(containers)
- .map(container -> new ContainerID(container.getContainerID()))
+ .map(ContainerInfo::containerID)
.collect(Collectors.toSet()));
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
index 77ed907..bc1b3dd 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/states/TestNode2ContainerMap.java
@@ -52,7 +52,7 @@ public class TestNode2ContainerMap {
TreeSet<ContainerID> currentSet = new TreeSet<>();
for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
- currentSet.add(new ContainerID(currentCnIndex));
+ currentSet.add(ContainerID.valueOf(currentCnIndex));
}
testData.put(UUID.randomUUID(), currentSet);
}
@@ -206,7 +206,7 @@ public class TestNode2ContainerMap {
TreeSet<ContainerID> addedContainers = new TreeSet<>();
for (int x = 1; x <= newCount; x++) {
long cTemp = last.getId() + x;
- addedContainers.add(new ContainerID(cTemp));
+ addedContainers.add(ContainerID.valueOf(cTemp));
}
// This set is the super set of existing containers and new containers.
@@ -250,7 +250,7 @@ public class TestNode2ContainerMap {
for (int x = 0; x < removeCount; x++) {
int startBase = (int) first.getId();
long cTemp = r.nextInt(values.size());
- removedContainers.add(new ContainerID(cTemp + startBase));
+ removedContainers.add(ContainerID.valueOf(cTemp + startBase));
}
// This set is a new set with some containers removed.
@@ -282,7 +282,7 @@ public class TestNode2ContainerMap {
Set<ContainerID> insertedSet = new TreeSet<>();
// Insert nodes from 1..30
for (int x = 1; x <= 30; x++) {
- insertedSet.add(new ContainerID(x));
+ insertedSet.add(ContainerID.valueOf(x));
}
@@ -296,7 +296,7 @@ public class TestNode2ContainerMap {
for (int x = 0; x < removeCount; x++) {
int startBase = (int) first.getId();
long cTemp = r.nextInt(values.size());
- removedContainers.add(new ContainerID(cTemp + startBase));
+ removedContainers.add(ContainerID.valueOf(cTemp + startBase));
}
Set<ContainerID> newSet = new TreeSet<>(values);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index a8f03bb..642378f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -162,7 +162,7 @@ public class TestPipelineManagerImpl {
PipelineID pipelineID = pipeline.getId();
pipelineManager.openPipeline(pipelineID);
- pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueof(1));
+ pipelineManager.addContainerToPipeline(pipelineID, ContainerID.valueOf(1));
Assert.assertTrue(pipelineManager
.getPipelines(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE,
@@ -262,7 +262,7 @@ public class TestPipelineManagerImpl {
// Open the pipeline
pipelineManager.openPipeline(pipeline.getId());
pipelineManager
- .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+ .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
Assert.assertTrue(pipelineManager
.getPipelines(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
index 8252e2c..43d5398 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineStateManager.java
@@ -290,14 +290,14 @@ public class TestPipelineStateManager {
stateManager.addPipeline(pipeline);
pipeline = stateManager.getPipeline(pipeline.getId());
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(++containerID));
+ ContainerID.valueOf(++containerID));
// move pipeline to open state
stateManager.openPipeline(pipeline.getId());
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(++containerID));
+ ContainerID.valueOf(++containerID));
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(++containerID));
+ ContainerID.valueOf(++containerID));
//verify the number of containers returned
Set<ContainerID> containerIDs =
@@ -307,7 +307,7 @@ public class TestPipelineStateManager {
removePipeline(pipeline);
try {
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(++containerID));
+ ContainerID.valueOf(++containerID));
Assert.fail("Container should not have been added");
} catch (IOException e) {
// Can not add a container to removed pipeline
@@ -322,7 +322,7 @@ public class TestPipelineStateManager {
// close the pipeline
stateManager.openPipeline(pipeline.getId());
stateManager
- .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+ .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
try {
stateManager.removePipeline(pipeline.getId());
@@ -347,26 +347,26 @@ public class TestPipelineStateManager {
stateManager.openPipeline(pipeline.getId());
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
Assert.assertEquals(1, stateManager.getContainers(pipeline.getId()).size());
stateManager.removeContainerFromPipeline(pipeline.getId(),
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
// add two containers in the pipeline
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(++containerID));
+ ContainerID.valueOf(++containerID));
stateManager.addContainerToPipeline(pipeline.getId(),
- ContainerID.valueof(++containerID));
+ ContainerID.valueOf(++containerID));
Assert.assertEquals(2, stateManager.getContainers(pipeline.getId()).size());
// move pipeline to closing state
stateManager.finalizePipeline(pipeline.getId());
stateManager.removeContainerFromPipeline(pipeline.getId(),
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
stateManager.removeContainerFromPipeline(pipeline.getId(),
- ContainerID.valueof(--containerID));
+ ContainerID.valueOf(--containerID));
Assert.assertEquals(0, stateManager.getContainers(pipeline.getId()).size());
// clean up
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index 7f53736..9cc9b3e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -183,7 +183,7 @@ public class TestSCMPipelineManager {
HddsProtos.ReplicationFactor.THREE);
pipelineManager.openPipeline(pipeline.getId());
pipelineManager
- .addContainerToPipeline(pipeline.getId(), ContainerID.valueof(1));
+ .addContainerToPipeline(pipeline.getId(), ContainerID.valueOf(1));
pipelineManager.closePipeline(pipeline, false);
pipelineManager.close();
@@ -428,7 +428,7 @@ public class TestSCMPipelineManager {
final PipelineID pid = pipeline.getId();
pipelineManager.openPipeline(pid);
- pipelineManager.addContainerToPipeline(pid, ContainerID.valueof(1));
+ pipelineManager.addContainerToPipeline(pid, ContainerID.valueOf(1));
Assert.assertTrue(pipelineManager
.getPipelines(HddsProtos.ReplicationType.RATIS,
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
index 03cdb72..adffbd8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyOutputStream.java
@@ -324,7 +324,7 @@ public class KeyOutputStream extends OutputStream {
// if the container needs to be excluded , add the container to the
// exclusion list , otherwise add the pipeline to the exclusion list
if (containerExclusionException) {
- excludeList.addConatinerId(ContainerID.valueof(containerId));
+ excludeList.addConatinerId(ContainerID.valueOf(containerId));
} else {
excludeList.addPipeline(pipelineId);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
index 3842818..70f4152 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManagerIntegration.java
@@ -252,7 +252,7 @@ public class TestContainerStateManagerIntegration {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline(),
- new HashSet<>(Collections.singletonList(new ContainerID(1))));
+ new HashSet<>(Collections.singletonList(ContainerID.valueOf(1))));
Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
info.getContainerID());
}
@@ -277,8 +277,8 @@ public class TestContainerStateManagerIntegration {
ContainerInfo info = containerManager
.getMatchingContainer(OzoneConsts.GB * 3, OzoneConsts.OZONE,
container1.getPipeline(),
- new HashSet<>(Arrays.asList(new ContainerID(1), new
- ContainerID(2), new ContainerID(3))));
+ new HashSet<>(Arrays.asList(ContainerID.valueOf(1),
+ ContainerID.valueOf(2), ContainerID.valueOf(3))));
Assert.assertEquals(info.getContainerID(), 4);
}
@@ -418,7 +418,7 @@ public class TestContainerStateManagerIntegration {
.setUuid(UUID.randomUUID()).build();
// Test 1: no replica's exist
- ContainerID containerID = ContainerID.valueof(RandomUtils.nextLong());
+ ContainerID containerID = ContainerID.valueOf(RandomUtils.nextLong());
Set<ContainerReplica> replicaSet;
try {
containerStateManager.getContainerReplicas(containerID);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
index cbe84b6..cc6824e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/metrics/TestSCMContainerManagerMetrics.java
@@ -113,7 +113,7 @@ public class TestSCMContainerManagerMetrics {
"NumSuccessfulDeleteContainers", metrics);
containerManager.deleteContainer(
- new ContainerID(containerInfo.getContainerID()));
+ ContainerID.valueOf(containerInfo.getContainerID()));
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(getLongCounter("NumSuccessfulDeleteContainers",
@@ -123,7 +123,7 @@ public class TestSCMContainerManagerMetrics {
try {
// Give random container to delete.
containerManager.deleteContainer(
- new ContainerID(RandomUtils.nextLong(10000, 20000)));
+ ContainerID.valueOf(RandomUtils.nextLong(10000, 20000)));
fail("testContainerOpsMetrics failed");
} catch (IOException ex) {
// Here it should fail, so it should have the old metric value.
@@ -135,7 +135,7 @@ public class TestSCMContainerManagerMetrics {
}
containerManager.listContainer(
- new ContainerID(containerInfo.getContainerID()), 1);
+ ContainerID.valueOf(containerInfo.getContainerID()), 1);
metrics = getMetrics(SCMContainerManagerMetrics.class.getSimpleName());
Assert.assertEquals(getLongCounter("NumListContainerOps",
metrics), 1);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
index dd543ed..69615e8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/OzoneTestUtils.java
@@ -58,21 +58,21 @@ public final class OzoneTestUtils {
StorageContainerManager scm) throws Exception {
performOperationOnKeyContainers((blockID) -> {
if (scm.getContainerManager()
- .getContainer(ContainerID.valueof(blockID.getContainerID()))
+ .getContainer(ContainerID.valueOf(blockID.getContainerID()))
.getState() == HddsProtos.LifeCycleState.OPEN) {
scm.getContainerManager()
- .updateContainerState(ContainerID.valueof(blockID.getContainerID()),
+ .updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
HddsProtos.LifeCycleEvent.FINALIZE);
}
if (scm.getContainerManager()
- .getContainer(ContainerID.valueof(blockID.getContainerID()))
+ .getContainer(ContainerID.valueOf(blockID.getContainerID()))
.getState() == HddsProtos.LifeCycleState.CLOSING) {
scm.getContainerManager()
- .updateContainerState(ContainerID.valueof(blockID.getContainerID()),
+ .updateContainerState(ContainerID.valueOf(blockID.getContainerID()),
HddsProtos.LifeCycleEvent.CLOSE);
}
Assert.assertFalse(scm.getContainerManager()
- .getContainer(ContainerID.valueof(blockID.getContainerID()))
+ .getContainer(ContainerID.valueOf(blockID.getContainerID()))
.isOpen());
}, omKeyLocationInfoGroups);
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index d9f7578..9fc8927 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -160,7 +160,7 @@ public class TestContainerReplicationEndToEnd {
long containerID = omKeyLocationInfo.getContainerID();
PipelineID pipelineID =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(new ContainerID(containerID)).getPipelineID();
+ .getContainer(ContainerID.valueOf(containerID)).getPipelineID();
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(pipelineID);
@@ -168,13 +168,13 @@ public class TestContainerReplicationEndToEnd {
HddsProtos.LifeCycleState containerState =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(new ContainerID(containerID)).getState();
+ .getContainer(ContainerID.valueOf(containerID)).getState();
LoggerFactory.getLogger(TestContainerReplicationEndToEnd.class).info(
"Current Container State is {}", containerState);
if ((containerState != HddsProtos.LifeCycleState.CLOSING) &&
(containerState != HddsProtos.LifeCycleState.CLOSED)) {
cluster.getStorageContainerManager().getContainerManager()
- .updateContainerState(new ContainerID(containerID),
+ .updateContainerState(ContainerID.valueOf(containerID),
HddsProtos.LifeCycleEvent.FINALIZE);
}
// wait for container to move to OPEN state in SCM
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index a9c0706..2de63d5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -177,7 +177,7 @@ public class TestFailureHandlingByClient {
long containerId = locationInfoList.get(0).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
.getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -217,7 +217,7 @@ public class TestFailureHandlingByClient {
BlockID blockId = locationInfoList.get(0).getBlockID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -280,7 +280,7 @@ public class TestFailureHandlingByClient {
key.flush();
Assert.assertTrue(keyOutputStream.getExcludeList().getContainerIds()
- .contains(ContainerID.valueof(containerId)));
+ .contains(ContainerID.valueOf(containerId)));
Assert.assertTrue(
keyOutputStream.getExcludeList().getDatanodes().isEmpty());
Assert.assertTrue(
@@ -328,7 +328,7 @@ public class TestFailureHandlingByClient {
BlockID blockId = streamEntryList.get(0).getBlockID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -391,7 +391,7 @@ public class TestFailureHandlingByClient {
BlockID blockId = streamEntryList.get(0).getBlockID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
index 76027f7..57158bb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClientFlushDelay.java
@@ -183,7 +183,7 @@ public class TestFailureHandlingByClientFlushDelay {
BlockID blockId = streamEntryList.get(0).getBlockID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index b435ce9..2a97dab 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -159,7 +159,7 @@ public class TestMultiBlockWritesWithDnFailures {
long containerId = locationInfoList.get(1).getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
.getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -207,7 +207,7 @@ public class TestMultiBlockWritesWithDnFailures {
BlockID blockId = streamEntryList.get(0).getBlockID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerId));
+ .getContainer(ContainerID.valueOf(containerId));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
index dd871f3..76861d4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnException.java
@@ -155,7 +155,7 @@ public class TestOzoneClientRetriesOnException {
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -201,7 +201,7 @@ public class TestOzoneClientRetriesOnException {
containerID = entry.getBlockID().getContainerID();
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
index e202ca1..a96cbe6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptionFlushDelay.java
@@ -147,7 +147,7 @@ public class TestOzoneClientRetriesOnExceptionFlushDelay {
Assert.assertTrue(keyOutputStream.getStreamEntries().size() == 1);
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index b7b75a4..24b8620 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -1542,7 +1542,7 @@ public abstract class TestOzoneRpcClientAbstract {
// Second, sum the data size from chunks in Container via containerID
// and localID, make sure the size equals to the size from keyDetails.
ContainerInfo container = cluster.getStorageContainerManager()
- .getContainerManager().getContainer(ContainerID.valueof(containerID));
+ .getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
index 9148459..5e8e5cc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestReadRetries.java
@@ -169,7 +169,7 @@ public class TestReadRetries {
.assertEquals(value.getBytes().length, keyLocations.get(0).getLength());
ContainerInfo container = cluster.getStorageContainerManager()
- .getContainerManager().getContainer(ContainerID.valueof(containerID));
+ .getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index fab2ea3..21bbc04 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -174,7 +174,7 @@ public final class TestHelper {
for (long containerID : containerIdList) {
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -250,7 +250,7 @@ public final class TestHelper {
for (long containerID : containerIdList) {
ContainerInfo container =
cluster.getStorageContainerManager().getContainerManager()
- .getContainer(ContainerID.valueof(containerID));
+ .getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline =
cluster.getStorageContainerManager().getPipelineManager()
.getPipeline(container.getPipelineID());
@@ -271,7 +271,7 @@ public final class TestHelper {
// send the order to close the container
cluster.getStorageContainerManager().getEventQueue()
.fireEvent(SCMEvents.CLOSE_CONTAINER,
- ContainerID.valueof(containerID));
+ ContainerID.valueOf(containerID));
}
}
int index = 0;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 6b40179..853f2cd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -122,7 +122,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
- .getContainerManager().getContainer(ContainerID.valueof(containerID));
+ .getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -179,7 +179,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
- .getContainerManager().getContainer(ContainerID.valueof(containerID));
+ .getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -232,7 +232,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
- .getContainerManager().getContainer(ContainerID.valueof(containerID));
+ .getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
@@ -295,7 +295,7 @@ public class TestCloseContainerByPipeline {
long containerID = omKeyLocationInfo.getContainerID();
ContainerInfo container = cluster.getStorageContainerManager()
- .getContainerManager().getContainer(ContainerID.valueof(containerID));
+ .getContainerManager().getContainer(ContainerID.valueOf(containerID));
Pipeline pipeline = cluster.getStorageContainerManager()
.getPipelineManager().getPipeline(container.getPipelineID());
List<DatanodeDetails> datanodes = pipeline.getNodes();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
index 831c729..8bd054b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
@@ -107,7 +107,7 @@ public class TestCloseContainerHandler {
cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
.get(0).getBlocksLatestVersionOnly().get(0);
- ContainerID containerId = ContainerID.valueof(
+ ContainerID containerId = ContainerID.valueOf(
omKeyLocationInfo.getContainerID());
ContainerInfo container = cluster.getStorageContainerManager()
.getContainerManager().getContainer(containerId);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
index 28b58d9..61c3369 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestDeleteContainerHandler.java
@@ -252,7 +252,7 @@ public class TestDeleteContainerHandler {
cluster.getOzoneManager().lookupKey(keyArgs).getKeyLocationVersions()
.get(0).getBlocksLatestVersionOnly().get(0);
- return ContainerID.valueof(
+ return ContainerID.valueOf(
omKeyLocationInfo.getContainerID());
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
index 631d944..fbdee7e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/dn/scrubber/TestDataScrubber.java
@@ -182,7 +182,7 @@ public class TestDataScrubber {
ContainerManager cm = cluster.getStorageContainerManager()
.getContainerManager();
Set<ContainerReplica> replicas = cm.getContainerReplicas(
- ContainerID.valueof(c.getContainerData().getContainerID()));
+ ContainerID.valueOf(c.getContainerData().getContainerID()));
Assert.assertEquals(1, replicas.size());
ContainerReplica r = replicas.iterator().next();
Assert.assertEquals(StorageContainerDatanodeProtocolProtos.
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 7f049a3..1a4dddc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -128,7 +128,7 @@ public class TestContainerReportWithKeys {
ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
Set<ContainerReplica> replicas =
scm.getContainerManager().getContainerReplicas(
- new ContainerID(keyInfo.getContainerID()));
+ ContainerID.valueOf(keyInfo.getContainerID()));
Assert.assertTrue(replicas.size() == 1);
replicas.stream().forEach(rp ->
Assert.assertTrue(rp.getDatanodeDetails().getParent() != null));
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
index 9092cc5..ecb2a46 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/recon/TestReconAsPassiveScm.java
@@ -202,6 +202,6 @@ public class TestReconAsPassiveScm {
LambdaTestUtils.await(90000, 5000,
() -> (newReconScm.getContainerManager()
- .exists(ContainerID.valueof(containerID))));
+ .exists(ContainerID.valueOf(containerID))));
}
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
index 394c102..3afe483 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMXBean.java
@@ -159,16 +159,18 @@ public class TestSCMMXBean {
if (i % 2 == 0) {
containerID = containerInfoList.get(i).getContainerID();
scmContainerManager.updateContainerState(
- new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
- assertEquals(scmContainerManager.getContainer(new ContainerID(
+ ContainerID.valueOf(containerID),
+ HddsProtos.LifeCycleEvent.FINALIZE);
+ assertEquals(scmContainerManager.getContainer(ContainerID.valueOf(
containerID)).getState(), HddsProtos.LifeCycleState.CLOSING);
} else {
containerID = containerInfoList.get(i).getContainerID();
scmContainerManager.updateContainerState(
- new ContainerID(containerID), HddsProtos.LifeCycleEvent.FINALIZE);
+ ContainerID.valueOf(containerID),
+ HddsProtos.LifeCycleEvent.FINALIZE);
scmContainerManager.updateContainerState(
- new ContainerID(containerID), HddsProtos.LifeCycleEvent.CLOSE);
- assertEquals(scmContainerManager.getContainer(new ContainerID(
+ ContainerID.valueOf(containerID), HddsProtos.LifeCycleEvent.CLOSE);
+ assertEquals(scmContainerManager.getContainer(ContainerID.valueOf(
containerID)).getState(), HddsProtos.LifeCycleState.CLOSED);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
index 1778b84..10522cb 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/ContainerEndpoint.java
@@ -246,7 +246,7 @@ public class ContainerEndpoint {
long containerID = container.getContainerId();
try {
ContainerInfo containerInfo =
- containerManager.getContainer(new ContainerID(containerID));
+ containerManager.getContainer(ContainerID.valueOf(containerID));
long keyCount = containerInfo.getNumberOfKeys();
UUID pipelineID = containerInfo.getPipelineID().getId();
@@ -307,7 +307,7 @@ public class ContainerEndpoint {
for (UnhealthyContainers c : containers) {
long containerID = c.getContainerId();
ContainerInfo containerInfo =
- containerManager.getContainer(new ContainerID(containerID));
+ containerManager.getContainer(ContainerID.valueOf(containerID));
long keyCount = containerInfo.getNumberOfKeys();
UUID pipelineID = containerInfo.getPipelineID().getId();
List<ContainerHistory> datanodes =
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
index 315dd5c..f005509 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/fsck/ContainerHealthTask.java
@@ -97,7 +97,7 @@ public class ContainerHealthTask extends ReconScmTask {
private ContainerHealthStatus setCurrentContainer(long recordId)
throws ContainerNotFoundException {
ContainerInfo container =
- containerManager.getContainer(new ContainerID(recordId));
+ containerManager.getContainer(ContainerID.valueOf(recordId));
Set<ContainerReplica> replicas =
containerManager.getContainerReplicas(container.containerID());
return new ContainerHealthStatus(container, replicas, placementPolicy);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index dff4709..c32ce05 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -162,7 +162,7 @@ public class ReconContainerManager extends SCMContainerManager {
containerInfo.containerID(), ex);
getPipelineManager().removeContainerFromPipeline(
containerInfo.getPipelineID(),
- new ContainerID(containerInfo.getContainerID()));
+ ContainerID.valueOf(containerInfo.getContainerID()));
throw ex;
} finally {
getLock().unlock();
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
index 228a657..391d2c5 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerReportHandler.java
@@ -56,7 +56,7 @@ public class ReconContainerReportHandler extends ContainerReportHandler {
List<ContainerReplicaProto> reportsList = containerReport.getReportsList();
for (ContainerReplicaProto containerReplicaProto : reportsList) {
- final ContainerID id = ContainerID.valueof(
+ final ContainerID id = ContainerID.valueOf(
containerReplicaProto.getContainerID());
try {
containerManager.checkAndAddNewContainer(id,
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index 0262c8b..863ef46 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -70,7 +70,7 @@ public class ReconIncrementalContainerReportHandler
for (ContainerReplicaProto replicaProto :
report.getReport().getReportList()) {
try {
- final ContainerID id = ContainerID.valueof(
+ final ContainerID id = ContainerID.valueOf(
replicaProto.getContainerID());
try {
containerManager.checkAndAddNewContainer(id, replicaProto.getState(),
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 6ba6f56..514f919 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -98,7 +98,7 @@ public class TestContainerEndpoint {
private boolean isSetupDone = false;
private ContainerSchemaManager containerSchemaManager;
private ReconOMMetadataManager reconOMMetadataManager;
- private ContainerID containerID = new ContainerID(1L);
+ private ContainerID containerID = ContainerID.valueOf(1L);
private PipelineID pipelineID;
private long keyCount = 5L;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
index 0a3546a..0bfa179 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
@@ -52,7 +52,7 @@ public class TestContainerHealthStatus {
container = mock(ContainerInfo.class);
when(container.getReplicationFactor())
.thenReturn(HddsProtos.ReplicationFactor.THREE);
- when(container.containerID()).thenReturn(new ContainerID(123456));
+ when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
when(container.getContainerID()).thenReturn((long)123456);
when(placementPolicy.validateContainerPlacement(
Mockito.anyList(), Mockito.anyInt()))
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index d97b143..890c242 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -89,19 +89,19 @@ public class TestContainerHealthTask extends AbstractReconSqlDBTest {
when(containerManagerMock.getContainer(c.containerID())).thenReturn(c);
}
// Under replicated
- when(containerManagerMock.getContainerReplicas(new ContainerID(1L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L)))
.thenReturn(getMockReplicas(1L, State.CLOSED, State.UNHEALTHY));
// return one UNHEALTHY replica for container ID 2 -> Missing
- when(containerManagerMock.getContainerReplicas(new ContainerID(2L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L)))
.thenReturn(getMockReplicas(2L, State.UNHEALTHY));
// return 0 replicas for container ID 3 -> Missing
- when(containerManagerMock.getContainerReplicas(new ContainerID(3L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L)))
.thenReturn(Collections.emptySet());
// Return 5 Healthy -> Over replicated
- when(containerManagerMock.getContainerReplicas(new ContainerID(4L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L)))
.thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED,
State.CLOSED, State.CLOSED, State.CLOSED));
@@ -110,11 +110,11 @@ public class TestContainerHealthTask extends AbstractReconSqlDBTest {
State.CLOSED, State.CLOSED, State.CLOSED);
placementMock.setMisRepWhenDnPresent(
misReplicas.iterator().next().getDatanodeDetails().getUuid());
- when(containerManagerMock.getContainerReplicas(new ContainerID(5L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(5L)))
.thenReturn(misReplicas);
// Return 3 Healthy -> Healthy container
- when(containerManagerMock.getContainerReplicas(new ContainerID(6L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(6L)))
.thenReturn(getMockReplicas(6L,
State.CLOSED, State.CLOSED, State.CLOSED));
@@ -164,20 +164,20 @@ public class TestContainerHealthTask extends AbstractReconSqlDBTest {
// Now run the job again, to check that relevant records are updated or
// removed as appropriate. Need to adjust the return value for all the mocks
// Under replicated -> Delta goes from 2 to 1
- when(containerManagerMock.getContainerReplicas(new ContainerID(1L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(1L)))
.thenReturn(getMockReplicas(1L, State.CLOSED, State.CLOSED));
// ID 2 was missing - make it healthy now
- when(containerManagerMock.getContainerReplicas(new ContainerID(2L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(2L)))
.thenReturn(getMockReplicas(2L,
State.CLOSED, State.CLOSED, State.CLOSED));
// return 0 replicas for container ID 3 -> Still Missing
- when(containerManagerMock.getContainerReplicas(new ContainerID(3L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(3L)))
.thenReturn(Collections.emptySet());
// Return 4 Healthy -> Delta changes from -2 to -1
- when(containerManagerMock.getContainerReplicas(new ContainerID(4L)))
+ when(containerManagerMock.getContainerReplicas(ContainerID.valueOf(4L)))
.thenReturn(getMockReplicas(4L, State.CLOSED, State.CLOSED,
State.CLOSED, State.CLOSED));
@@ -215,7 +215,7 @@ public class TestContainerHealthTask extends AbstractReconSqlDBTest {
replicas.add(ContainerReplica.newBuilder()
.setDatanodeDetails(MockDatanodeDetails.randomDatanodeDetails())
.setContainerState(s)
- .setContainerID(new ContainerID(containerId))
+ .setContainerID(ContainerID.valueOf(containerId))
.setSequenceId(1)
.build());
}
@@ -229,7 +229,7 @@ public class TestContainerHealthTask extends AbstractReconSqlDBTest {
when(c.getContainerID()).thenReturn((long)i);
when(c.getReplicationFactor())
.thenReturn(HddsProtos.ReplicationFactor.THREE);
- when(c.containerID()).thenReturn(new ContainerID(i));
+ when(c.containerID()).thenReturn(ContainerID.valueOf(i));
containers.add(c);
}
return containers;
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
index 62baf12..ccc9de3 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
@@ -59,7 +59,7 @@ public class TestContainerHealthTaskRecordGenerator {
container = mock(ContainerInfo.class);
when(container.getReplicationFactor())
.thenReturn(HddsProtos.ReplicationFactor.THREE);
- when(container.containerID()).thenReturn(new ContainerID(123456));
+ when(container.containerID()).thenReturn(ContainerID.valueOf(123456));
when(container.getContainerID()).thenReturn((long)123456);
when(placementPolicy.validateContainerPlacement(
Mockito.anyList(), Mockito.anyInt()))
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 783f42c..a5ee0a2 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -113,7 +113,7 @@ public class AbstractReconContainerManagerTest {
Pipeline pipeline = getRandomPipeline();
getPipelineManager().addPipeline(pipeline);
- ContainerID containerID = new ContainerID(100L);
+ ContainerID containerID = ContainerID.valueOf(100L);
ContainerInfo containerInfo =
new ContainerInfo.Builder()
.setContainerID(containerID.getId())
@@ -140,7 +140,7 @@ public class AbstractReconContainerManagerTest {
protected ContainerWithPipeline getTestContainer(LifeCycleState state)
throws IOException {
- ContainerID containerID = new ContainerID(100L);
+ ContainerID containerID = ContainerID.valueOf(100L);
Pipeline pipeline = getRandomPipeline();
pipelineManager.addPipeline(pipeline);
ContainerInfo containerInfo =
@@ -159,7 +159,7 @@ public class AbstractReconContainerManagerTest {
protected ContainerWithPipeline getTestContainer(long id,
LifeCycleState state)
throws IOException {
- ContainerID containerID = new ContainerID(id);
+ ContainerID containerID = ContainerID.valueOf(id);
Pipeline pipeline = getRandomPipeline();
pipelineManager.addPipeline(pipeline);
ContainerInfo containerInfo =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
index 9f47779..49a5f39 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
@@ -100,7 +100,7 @@ public class TestReconContainerManager
@Test
public void testCheckAndAddNewContainer() throws IOException {
- ContainerID containerID = new ContainerID(100L);
+ ContainerID containerID = ContainerID.valueOf(100L);
ReconContainerManager containerManager = getContainerManager();
assertFalse(containerManager.exists(containerID));
DatanodeDetails datanodeDetails = randomDatanodeDetails();
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 1b42f21..97eaf96 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -62,7 +62,7 @@ public class TestReconIncrementalContainerReportHandler
@Test
public void testProcessICR() throws IOException, NodeNotFoundException {
- ContainerID containerID = new ContainerID(100L);
+ ContainerID containerID = ContainerID.valueOf(100L);
DatanodeDetails datanodeDetails = randomDatanodeDetails();
IncrementalContainerReportFromDatanode reportMock =
mock(IncrementalContainerReportFromDatanode.class);
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 04/11: HDDS-4125. Pipeline is not removed when a
datanode goes stale.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit d482abf62fa1ec72da8de0007535b82466a1be57
Author: Glen Geng <gl...@tencent.com>
AuthorDate: Sat Oct 24 20:59:18 2020 +0530
HDDS-4125. Pipeline is not removed when a datanode goes stale.
---
.../org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java | 5 +----
1 file changed, 1 insertion(+), 4 deletions(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
index 069540c..4690f29 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineManagerV2Impl.java
@@ -406,10 +406,7 @@ public final class PipelineManagerV2Impl implements PipelineManager {
public void scrubPipeline(ReplicationType type, ReplicationFactor factor)
throws IOException {
checkLeader();
- if (type != ReplicationType.RATIS || factor != ReplicationFactor.THREE) {
- // Only srub pipeline for RATIS THREE pipeline
- return;
- }
+
Instant currentTime = Instant.now();
Long pipelineScrubTimeoutInMills = conf.getTimeDuration(
ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT,
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 10/11: HDDS-4365.
SCMBlockLocationFailoverProxyProvider should use
ScmBlockLocationProtocolPB.class in RPC.setProtocolEngine.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 43b87fe01625d56343b86e9baa1cc20f3ab73236
Author: Glen Geng <gl...@tencent.com>
AuthorDate: Sat Oct 24 21:33:16 2020 +0530
HDDS-4365. SCMBlockLocationFailoverProxyProvider should use ScmBlockLocationProtocolPB.class in RPC.setProtocolEngine.
---
.../hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java | 5 ++---
.../src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java | 3 ---
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
index 1beb69e..a9ff4c1 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
@@ -22,7 +22,6 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
import org.apache.hadoop.io.retry.FailoverProxyProvider;
@@ -80,7 +79,7 @@ public class SCMBlockLocationFailoverProxyProvider implements
public SCMBlockLocationFailoverProxyProvider(ConfigurationSource conf) {
this.conf = conf;
- this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocol.class);
+ this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
this.scmServiceId = conf.getTrimmed(OZONE_SCM_SERVICE_IDS_KEY);
this.scmProxies = new HashMap<>();
this.scmProxyInfoMap = new HashMap<>();
@@ -257,7 +256,7 @@ public class SCMBlockLocationFailoverProxyProvider implements
InetSocketAddress scmAddress) throws IOException {
Configuration hadoopConf =
LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
- RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocol.class,
+ RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocolPB.class,
ProtobufRpcEngine.class);
return RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
scmAddress, UserGroupInformation.getCurrentUser(), hadoopConf,
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index 898cd7c..307ec30 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -66,7 +66,6 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider;
@@ -822,8 +821,6 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
*/
private static ScmBlockLocationProtocol getScmBlockClient(
OzoneConfiguration conf) throws IOException {
- RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
- ProtobufRpcEngine.class);
ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
new ScmBlockLocationProtocolClientSideTranslatorPB(
new SCMBlockLocationFailoverProxyProvider(conf));
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 05/11: HDDS-4130. remove the 1st edition of
RatisServer of SCM HA which is copied from OM HA.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit a70964ec84ac04ca3f0dbad9dd7b43f0f0807707
Author: Glen Geng <gl...@tencent.com>
AuthorDate: Sat Oct 24 21:04:32 2020 +0530
HDDS-4130. remove the 1st edition of RatisServer of SCM HA which is copied from OM HA.
---
.../org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java | 14 --
.../hdds/scm/server/StorageContainerManager.java | 78 +----------
.../hdds/scm/server/ratis/SCMStateMachine.java | 6 +-
.../hdds/scm/server/ratis/TestSCMRatisServer.java | 147 ---------------------
.../hdds/scm/server/ratis/TestSCMStateMachine.java | 120 -----------------
5 files changed, 5 insertions(+), 360 deletions(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
index 0f71744..48946b4 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/ha/SCMHAUtils.java
@@ -19,13 +19,9 @@
package org.apache.hadoop.hdds.scm.ha;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.ConfigurationSource;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.ScmUtils;
-import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer;
-import java.io.File;
import java.util.Collection;
/**
@@ -42,16 +38,6 @@ public final class SCMHAUtils {
ScmConfigKeys.OZONE_SCM_HA_ENABLE_DEFAULT);
}
- public static File createSCMRatisDir(ConfigurationSource conf)
- throws IllegalArgumentException {
- String scmRatisDir = SCMRatisServer.getSCMRatisDirectory(conf);
- if (scmRatisDir == null || scmRatisDir.isEmpty()) {
- throw new IllegalArgumentException(HddsConfigKeys.OZONE_METADATA_DIRS +
- " must be defined.");
- }
- return ScmUtils.createSCMDir(scmRatisDir);
- }
-
/**
* Get a collection of all scmNodeIds for the given scmServiceId.
*/
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index cbd713c..b17729b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -31,13 +31,11 @@ import com.google.common.cache.CacheBuilder;
import com.google.common.cache.RemovalListener;
import com.google.protobuf.BlockingService;
-import java.io.File;
import java.security.cert.CertificateException;
import java.security.cert.X509Certificate;
import java.util.Collection;
import java.util.HashMap;
import java.util.Map;
-import java.util.Collections;
import java.util.Objects;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
@@ -54,10 +52,6 @@ import org.apache.hadoop.hdds.scm.PipelineChoosePolicy;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.ha.SCMHAManagerImpl;
-import org.apache.hadoop.hdds.scm.ha.SCMHAUtils;
-import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
-import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisServer;
-import org.apache.hadoop.hdds.scm.server.ratis.SCMRatisSnapshotInfo;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.scm.ScmConfig;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -128,7 +122,6 @@ import org.apache.hadoop.util.JvmPauseMonitor;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT_DEFAULT;
import org.apache.ratis.grpc.GrpcTlsConfig;
-import org.apache.ratis.server.protocol.TermIndex;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@@ -202,11 +195,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
private CertificateServer certificateServer;
private GrpcTlsConfig grpcTlsConfig;
- // SCM HA related
- private SCMRatisServer scmRatisServer;
- private SCMRatisSnapshotInfo scmRatisSnapshotInfo;
- private File scmRatisSnapshotDir;
-
private JvmPauseMonitor jvmPauseMonitor;
private final OzoneConfiguration configuration;
private SCMContainerMetrics scmContainerMetrics;
@@ -276,10 +264,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
loginAsSCMUser(conf);
}
- this.scmRatisSnapshotInfo = new SCMRatisSnapshotInfo(
- scmStorageConfig.getCurrentDir());
- this.scmRatisSnapshotDir = SCMHAUtils.createSCMRatisDir(conf);
-
// Creates the SCM DBs or opens them if it exists.
// A valid pointer to the store is required by all the other services below.
initalizeMetadataStore(conf, configurator);
@@ -400,13 +384,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
public static StorageContainerManager createSCM(
OzoneConfiguration conf, SCMConfigurator configurator)
throws IOException, AuthenticationException {
- StorageContainerManager scm = new StorageContainerManager(
- conf, configurator);
- if (SCMHAUtils.isSCMHAEnabled(conf) && scm.getScmRatisServer() == null) {
- SCMRatisServer scmRatisServer = initializeRatisServer(conf, scm);
- scm.setScmRatisServer(scmRatisServer);
- }
- return scm;
+ return new StorageContainerManager(conf, configurator);
}
/**
@@ -840,10 +818,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
getClientRpcAddress()));
}
- if (scmRatisServer != null) {
- scmRatisServer.start();
- }
-
scmHAManager.start();
ms = HddsServerUtil
@@ -1180,56 +1154,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
return this.clusterMap;
}
- private static SCMRatisServer initializeRatisServer(
- OzoneConfiguration conf, StorageContainerManager scm) throws IOException {
- SCMNodeDetails scmNodeDetails = SCMNodeDetails
- .initStandAlone(conf);
- //TODO enable Ratis group
- SCMRatisServer scmRatisServer = SCMRatisServer.newSCMRatisServer(
- conf.getObject(SCMRatisServer.SCMRatisServerConfiguration.class),
- scm, scmNodeDetails, Collections.EMPTY_LIST,
- SCMRatisServer.getSCMRatisDirectory(conf));
- if (scmRatisServer != null) {
- LOG.info("SCM Ratis server initialized at port {}",
- scmRatisServer.getServerPort());
- } // TODO error handling for scmRatisServer creation failure
- return scmRatisServer;
- }
-
- @VisibleForTesting
- public SCMRatisServer getScmRatisServer() {
- return scmRatisServer;
- }
-
- public void setScmRatisServer(SCMRatisServer scmRatisServer) {
- this.scmRatisServer = scmRatisServer;
- }
-
- @VisibleForTesting
- public SCMRatisSnapshotInfo getSnapshotInfo() {
- return scmRatisSnapshotInfo;
- }
-
- @VisibleForTesting
- public long getRatisSnapshotIndex() {
- return scmRatisSnapshotInfo.getIndex();
- }
-
- /**
- * Save ratis snapshot to SCM meta store and local disk.
- */
- public TermIndex saveRatisSnapshot() throws IOException {
- TermIndex snapshotIndex = scmRatisServer.getLastAppliedTermIndex();
- if (scmMetadataStore != null) {
- // Flush the SCM state to disk
- scmMetadataStore.getStore().flushDB();
- }
-
- scmRatisSnapshotInfo.saveRatisSnapshotToDisk(snapshotIndex);
-
- return snapshotIndex;
- }
-
/**
* Get the safe mode status of all rules.
*
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java
index 144380a..9a725a6 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/ratis/SCMStateMachine.java
@@ -73,7 +73,8 @@ public class SCMStateMachine extends BaseStateMachine {
this.scmRatisServer = ratisServer;
this.scm = ratisServer.getSCM();
- this.snapshotInfo = scm.getSnapshotInfo();
+ // TODO: remove the whole file later
+ this.snapshotInfo = null;
updateLastAppliedIndexWithSnaphsotIndex();
ThreadFactory build = new ThreadFactoryBuilder().setDaemon(true)
@@ -158,7 +159,8 @@ public class SCMStateMachine extends BaseStateMachine {
public long takeSnapshot() throws IOException {
LOG.info("Saving Ratis snapshot on the SCM.");
if (scm != null) {
- return scm.saveRatisSnapshot().getIndex();
+ // TODO: remove the whole file later
+ return 0;
}
return 0;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java
deleted file mode 100644
index 8a233ae..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMRatisServer.java
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.server.ratis;
-
-import org.apache.hadoop.hdds.HddsConfigKeys;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.ha.SCMNodeDetails;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.ratis.protocol.RaftGroupId;
-import org.apache.ratis.util.LifeCycle;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.InetSocketAddress;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.Collections;
-import java.util.UUID;
-import java.util.concurrent.TimeUnit;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * Test class for SCM Ratis Server.
- */
-public class TestSCMRatisServer {
- @Rule
- public TemporaryFolder folder = new TemporaryFolder();
-
- private OzoneConfiguration conf;
- private SCMRatisServer scmRatisServer;
- private StorageContainerManager scm;
- private String scmId;
- private static final long LEADER_ELECTION_TIMEOUT = 500L;
-
- @Before
- public void init() throws Exception {
- conf = new OzoneConfiguration();
- scmId = UUID.randomUUID().toString();
- conf.setTimeDuration(
- ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
- LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
- conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- conf.set(ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID, "scm-ha-test");
-
- // Standalone SCM Ratis server
- initSCM();
- scm = TestUtils.getScm(conf);
- scm.start();
- scmRatisServer = scm.getScmRatisServer();
- }
-
- @After
- public void shutdown() {
- if (scmRatisServer != null) {
- scmRatisServer.stop();
- }
- if (scm != null) {
- scm.stop();
- }
- }
-
- @Test
- public void testStartSCMRatisServer() {
- Assert.assertEquals("Ratis Server should be in running state",
- LifeCycle.State.RUNNING, scmRatisServer.getServerState());
- }
-
- @Test
- public void verifyRaftGroupIdGenerationWithCustomOmServiceId() throws
- Exception {
- String customScmServiceId = "scmIdCustom123";
- OzoneConfiguration newConf = new OzoneConfiguration();
- String newOmId = UUID.randomUUID().toString();
- String path = GenericTestUtils.getTempPath(newOmId);
- Path metaDirPath = Paths.get(path, "scm-meta");
- newConf.set(HddsConfigKeys.OZONE_METADATA_DIRS, metaDirPath.toString());
- newConf.setTimeDuration(
- ScmConfigKeys.OZONE_SCM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
- LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
- int ratisPort = 9873;
- InetSocketAddress rpcAddress = new InetSocketAddress(
- InetAddress.getLocalHost(), 0);
- SCMNodeDetails nodeDetails = new SCMNodeDetails.Builder()
- .setRpcAddress(rpcAddress)
- .setRatisPort(ratisPort)
- .setSCMNodeId(newOmId)
- .setSCMServiceId(customScmServiceId)
- .build();
- // Starts a single node Ratis server
- scmRatisServer.stop();
- SCMRatisServer newScmRatisServer = SCMRatisServer
- .newSCMRatisServer(newConf.getObject(SCMRatisServer
- .SCMRatisServerConfiguration.class), scm, nodeDetails,
- Collections.emptyList(),
- SCMRatisServer.getSCMRatisDirectory(newConf));
- newScmRatisServer.start();
-
- UUID uuid = UUID.nameUUIDFromBytes(customScmServiceId.getBytes());
- RaftGroupId raftGroupId = newScmRatisServer.getRaftGroup().getGroupId();
- Assert.assertEquals(uuid, raftGroupId.getUuid());
- Assert.assertEquals(raftGroupId.toByteString().size(), 16);
- newScmRatisServer.stop();
- }
-
- private void initSCM() throws IOException {
- String clusterId = UUID.randomUUID().toString();
- scmId = UUID.randomUUID().toString();
-
- final String path = folder.newFolder().toString();
- Path scmPath = Paths.get(path, "scm-meta");
- Files.createDirectories(scmPath);
- conf.set(OZONE_METADATA_DIRS, scmPath.toString());
- SCMStorageConfig scmStore = new SCMStorageConfig(conf);
- scmStore.setClusterId(clusterId);
- scmStore.setScmId(scmId);
- // writes the version file properties
- scmStore.initialize();
- }
-}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java
deleted file mode 100644
index 0eddbde..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/ratis/TestSCMStateMachine.java
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.hdds.scm.server.ratis;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
-import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.junit.After;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.Test;
-import org.junit.rules.TemporaryFolder;
-
-import java.io.IOException;
-import java.nio.file.Files;
-import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.UUID;
-
-import static org.apache.hadoop.hdds.HddsConfigKeys.OZONE_METADATA_DIRS;
-
-/**
- * Test class for SCMStateMachine.
- */
-public class TestSCMStateMachine {
- @Rule
- public TemporaryFolder folder = new TemporaryFolder();
-
- private SCMStateMachine scmStateMachine;
- private StorageContainerManager scm;
- private SCMRatisServer scmRatisServer;
- private OzoneConfiguration conf;
- private String scmId;
- @Before
- public void init() throws Exception {
- conf = new OzoneConfiguration();
- conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
- conf.set(ScmConfigKeys.OZONE_SCM_INTERNAL_SERVICE_ID, "scm-ha-test");
- scmId = UUID.randomUUID().toString();
-
- initSCM();
- scm = TestUtils.getScm(conf);
- scm.start();
- scmRatisServer = scm.getScmRatisServer();
- scmStateMachine = scm.getScmRatisServer().getScmStateMachine();
- }
-
- @Test
- public void testSCMUpdatedAppliedIndex(){
- // State machine should start with 0 term and 0 index.
- scmStateMachine.notifyIndexUpdate(0, 0);
- Assert.assertEquals(0,
- scmStateMachine.getLastAppliedTermIndex().getTerm());
- Assert.assertEquals(0,
- scmStateMachine.getLastAppliedTermIndex().getIndex());
-
- // If only the transactionMap is updated, index should stay 0.
- scmStateMachine.addApplyTransactionTermIndex(0, 1);
- Assert.assertEquals(0L,
- scmStateMachine.getLastAppliedTermIndex().getTerm());
- Assert.assertEquals(0L,
- scmStateMachine.getLastAppliedTermIndex().getIndex());
-
- // After the index update is notified, the index should increase.
- scmStateMachine.notifyIndexUpdate(0, 1);
- Assert.assertEquals(0L,
- scmStateMachine.getLastAppliedTermIndex().getTerm());
- Assert.assertEquals(1L,
- scmStateMachine.getLastAppliedTermIndex().getIndex());
-
- // Only do a notifyIndexUpdate can also increase the index.
- scmStateMachine.notifyIndexUpdate(0, 2);
- Assert.assertEquals(0L,
- scmStateMachine.getLastAppliedTermIndex().getTerm());
- Assert.assertEquals(2L,
- scmStateMachine.getLastAppliedTermIndex().getIndex());
-
- // If a larger index is notified, the index should not be updated.
- scmStateMachine.notifyIndexUpdate(0, 5);
- Assert.assertEquals(0L,
- scmStateMachine.getLastAppliedTermIndex().getTerm());
- Assert.assertEquals(2L,
- scmStateMachine.getLastAppliedTermIndex().getIndex());
- }
-
- private void initSCM() throws IOException {
- String clusterId = UUID.randomUUID().toString();
- final String path = folder.newFolder().toString();
- Path scmPath = Paths.get(path, "scm-meta");
- Files.createDirectories(scmPath);
- conf.set(OZONE_METADATA_DIRS, scmPath.toString());
- SCMStorageConfig scmStore = new SCMStorageConfig(conf);
- scmStore.setClusterId(clusterId);
- scmStore.setScmId(scmId);
- // writes the version file properties
- scmStore.initialize();
- }
-
- @After
- public void cleanup() {
- scm.stop();
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org
[hadoop-ozone] 08/11: HDDS-3188. Add failover proxy for SCM block
location.
Posted by na...@apache.org.
This is an automated email from the ASF dual-hosted git repository.
nanda pushed a commit to branch HDDS-2823
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 9f7ab4680117d0f0b5251027c282ae819eaf921c
Author: Li Cheng <ti...@tencent.com>
AuthorDate: Sat Oct 24 21:27:49 2020 +0530
HDDS-3188. Add failover proxy for SCM block location.
---
.../hadoop/hdds/scm/exceptions/SCMException.java | 3 +-
...lockLocationProtocolClientSideTranslatorPB.java | 25 +-
.../SCMBlockLocationFailoverProxyProvider.java | 280 +++++++++++++++++++++
.../hadoop/hdds/scm/proxy/SCMClientConfig.java | 103 ++++++++
.../apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java | 73 ++++++
.../apache/hadoop/hdds/scm/proxy/package-info.java | 22 ++
.../src/main/proto/ScmServerProtocol.proto | 3 +
...lockLocationProtocolServerSideTranslatorPB.java | 18 ++
.../hdds/scm/server/SCMBlockProtocolServer.java | 4 +
.../hdds/scm/server/StorageContainerManager.java | 19 ++
.../org/apache/hadoop/ozone/om/OzoneManager.java | 11 +-
11 files changed, 545 insertions(+), 16 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
index 48a8e05..82e3034 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/exceptions/SCMException.java
@@ -124,6 +124,7 @@ public class SCMException extends IOException {
FAILED_TO_ALLOCATE_ENOUGH_BLOCKS,
INTERNAL_ERROR,
FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY,
- FAILED_TO_INIT_LEADER_CHOOSE_POLICY
+ FAILED_TO_INIT_LEADER_CHOOSE_POLICY,
+ SCM_NOT_LEADER
}
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
index e86ee81..12c51f6 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.client.ContainerBlockID;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationRequest;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.SCMBlockLocationResponse;
import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos.Type;
@@ -45,10 +46,11 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider;
import org.apache.hadoop.hdds.tracing.TracingUtil;
+import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.ipc.ProtobufHelper;
import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
@@ -73,15 +75,21 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
private static final RpcController NULL_RPC_CONTROLLER = null;
private final ScmBlockLocationProtocolPB rpcProxy;
+ private SCMBlockLocationFailoverProxyProvider failoverProxyProvider;
/**
* Creates a new StorageContainerLocationProtocolClientSideTranslatorPB.
*
- * @param rpcProxy {@link StorageContainerLocationProtocolPB} RPC proxy
+ * @param proxyProvider {@link SCMBlockLocationFailoverProxyProvider}
+ * failover proxy provider.
*/
public ScmBlockLocationProtocolClientSideTranslatorPB(
- ScmBlockLocationProtocolPB rpcProxy) {
- this.rpcProxy = rpcProxy;
+ SCMBlockLocationFailoverProxyProvider proxyProvider) {
+ Preconditions.checkState(proxyProvider != null);
+ this.failoverProxyProvider = proxyProvider;
+ this.rpcProxy = (ScmBlockLocationProtocolPB) RetryProxy.create(
+ ScmBlockLocationProtocolPB.class, failoverProxyProvider,
+ failoverProxyProvider.getSCMBlockLocationRetryPolicy(null));
}
/**
@@ -105,6 +113,11 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
try {
SCMBlockLocationResponse response =
rpcProxy.send(NULL_RPC_CONTROLLER, req);
+ if (response.getStatus() ==
+ ScmBlockLocationProtocolProtos.Status.SCM_NOT_LEADER) {
+ failoverProxyProvider
+ .performFailoverToAssignedLeader(response.getLeaderSCMNodeId());
+ }
return response;
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
@@ -267,7 +280,7 @@ public final class ScmBlockLocationProtocolClientSideTranslatorPB
}
@Override
- public void close() {
- RPC.stopProxy(rpcProxy);
+ public void close() throws IOException {
+ failoverProxyProvider.close();
}
}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
new file mode 100644
index 0000000..1beb69e
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMBlockLocationFailoverProxyProvider.java
@@ -0,0 +1,280 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol;
+import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
+import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.io.retry.FailoverProxyProvider;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_NAMES;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_SERVICE_IDS_KEY;
+import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
+import static org.apache.hadoop.hdds.HddsUtils.getPortNumberFromConfigKeys;
+import static org.apache.hadoop.hdds.HddsUtils.getHostName;
+
+/**
+ * Failover proxy provider for SCM.
+ */
+public class SCMBlockLocationFailoverProxyProvider implements
+ FailoverProxyProvider<ScmBlockLocationProtocolPB>, Closeable {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(SCMBlockLocationFailoverProxyProvider.class);
+
+ private Map<String, ProxyInfo<ScmBlockLocationProtocolPB>> scmProxies;
+ private Map<String, SCMProxyInfo> scmProxyInfoMap;
+ private List<String> scmNodeIDList;
+
+ private String currentProxySCMNodeId;
+ private int currentProxyIndex;
+
+ private final ConfigurationSource conf;
+ private final long scmVersion;
+
+ private final String scmServiceId;
+
+ private String lastAttemptedLeader;
+
+ private final int maxRetryCount;
+ private final long retryInterval;
+
+ public static final String SCM_DUMMY_NODEID_PREFIX = "scm";
+
+ public SCMBlockLocationFailoverProxyProvider(ConfigurationSource conf) {
+ this.conf = conf;
+ this.scmVersion = RPC.getProtocolVersion(ScmBlockLocationProtocol.class);
+ this.scmServiceId = conf.getTrimmed(OZONE_SCM_SERVICE_IDS_KEY);
+ this.scmProxies = new HashMap<>();
+ this.scmProxyInfoMap = new HashMap<>();
+ this.scmNodeIDList = new ArrayList<>();
+ loadConfigs();
+
+
+ this.currentProxyIndex = 0;
+ currentProxySCMNodeId = scmNodeIDList.get(currentProxyIndex);
+
+ SCMClientConfig config = conf.getObject(SCMClientConfig.class);
+ this.maxRetryCount = config.getRetryCount();
+ this.retryInterval = config.getRetryInterval();
+ }
+
+ @VisibleForTesting
+ protected Collection<InetSocketAddress> getSCMAddressList() {
+ Collection<String> scmAddressList =
+ conf.getTrimmedStringCollection(OZONE_SCM_NAMES);
+ Collection<InetSocketAddress> resultList = new ArrayList<>();
+ if (!scmAddressList.isEmpty()) {
+ final int port = getPortNumberFromConfigKeys(conf,
+ ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY)
+ .orElse(ScmConfigKeys.OZONE_SCM_BLOCK_CLIENT_PORT_DEFAULT);
+ for (String scmAddress : scmAddressList) {
+ LOG.info("SCM Address for proxy is {}", scmAddress);
+
+ Optional<String> hostname = getHostName(scmAddress);
+ if (hostname.isPresent()) {
+ resultList.add(NetUtils.createSocketAddr(
+ hostname.get() + ":" + port));
+ }
+ }
+ }
+ if (resultList.isEmpty()) {
+ // fall back
+ resultList.add(getScmAddressForBlockClients(conf));
+ }
+ return resultList;
+ }
+
+ private void loadConfigs() {
+ Collection<InetSocketAddress> scmAddressList = getSCMAddressList();
+ int scmNodeIndex = 1;
+ for (InetSocketAddress scmAddress : scmAddressList) {
+ String nodeId = SCM_DUMMY_NODEID_PREFIX + scmNodeIndex;
+ if (scmAddress == null) {
+ LOG.error("Failed to create SCM proxy for {}.", nodeId);
+ continue;
+ }
+ scmNodeIndex++;
+ SCMProxyInfo scmProxyInfo = new SCMProxyInfo(
+ scmServiceId, nodeId, scmAddress);
+ ProxyInfo<ScmBlockLocationProtocolPB> proxy = new ProxyInfo<>(
+ null, scmProxyInfo.toString());
+ scmProxies.put(nodeId, proxy);
+ scmProxyInfoMap.put(nodeId, scmProxyInfo);
+ scmNodeIDList.add(nodeId);
+ }
+
+ if (scmProxies.isEmpty()) {
+ throw new IllegalArgumentException("Could not find any configured " +
+ "addresses for SCM. Please configure the system with "
+ + OZONE_SCM_NAMES);
+ }
+ }
+
+ @VisibleForTesting
+ public synchronized String getCurrentProxyOMNodeId() {
+ return currentProxySCMNodeId;
+ }
+
+ @Override
+ public synchronized ProxyInfo getProxy() {
+ ProxyInfo currentProxyInfo = scmProxies.get(currentProxySCMNodeId);
+ createSCMProxyIfNeeded(currentProxyInfo, currentProxySCMNodeId);
+ return currentProxyInfo;
+ }
+
+ @Override
+ public void performFailover(ScmBlockLocationProtocolPB newLeader) {
+ // Should do nothing here.
+ LOG.debug("Failing over to next proxy. {}", getCurrentProxyOMNodeId());
+ }
+
+ public void performFailoverToAssignedLeader(String newLeader) {
+ if (newLeader == null) {
+ // If newLeader is not assigned, it will fail over to next proxy.
+ nextProxyIndex();
+ } else {
+ if (!assignLeaderToNode(newLeader)) {
+ LOG.debug("Failing over OM proxy to nodeId: {}", newLeader);
+ nextProxyIndex();
+ }
+ }
+ }
+
+ @Override
+ public Class<ScmBlockLocationProtocolPB> getInterface() {
+ return ScmBlockLocationProtocolPB.class;
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ for (ProxyInfo<ScmBlockLocationProtocolPB> proxy : scmProxies.values()) {
+ ScmBlockLocationProtocolPB scmProxy = proxy.proxy;
+ if (scmProxy != null) {
+ RPC.stopProxy(scmProxy);
+ }
+ }
+ }
+
+ public RetryAction getRetryAction(int failovers) {
+ if (failovers < maxRetryCount) {
+ return new RetryAction(RetryAction.RetryDecision.FAILOVER_AND_RETRY,
+ getRetryInterval());
+ } else {
+ return RetryAction.FAIL;
+ }
+ }
+
+ private synchronized long getRetryInterval() {
+ // TODO add exponential backup
+ return retryInterval;
+ }
+
+ private synchronized int nextProxyIndex() {
+ lastAttemptedLeader = currentProxySCMNodeId;
+
+ // round robin the next proxy
+ currentProxyIndex = (currentProxyIndex + 1) % scmProxies.size();
+ currentProxySCMNodeId = scmNodeIDList.get(currentProxyIndex);
+ return currentProxyIndex;
+ }
+
+ private synchronized boolean assignLeaderToNode(String newLeaderNodeId) {
+ if (!currentProxySCMNodeId.equals(newLeaderNodeId)) {
+ if (scmProxies.containsKey(newLeaderNodeId)) {
+ lastAttemptedLeader = currentProxySCMNodeId;
+ currentProxySCMNodeId = newLeaderNodeId;
+ currentProxyIndex = scmNodeIDList.indexOf(currentProxySCMNodeId);
+ return true;
+ }
+ } else {
+ lastAttemptedLeader = currentProxySCMNodeId;
+ }
+ return false;
+ }
+
+ /**
+ * Creates proxy object if it does not already exist.
+ */
+ private void createSCMProxyIfNeeded(ProxyInfo proxyInfo,
+ String nodeId) {
+ if (proxyInfo.proxy == null) {
+ InetSocketAddress address = scmProxyInfoMap.get(nodeId).getAddress();
+ try {
+ ScmBlockLocationProtocolPB proxy = createSCMProxy(address);
+ try {
+ proxyInfo.proxy = proxy;
+ } catch (IllegalAccessError iae) {
+ scmProxies.put(nodeId,
+ new ProxyInfo<>(proxy, proxyInfo.proxyInfo));
+ }
+ } catch (IOException ioe) {
+ LOG.error("{} Failed to create RPC proxy to SCM at {}",
+ this.getClass().getSimpleName(), address, ioe);
+ throw new RuntimeException(ioe);
+ }
+ }
+ }
+
+ private ScmBlockLocationProtocolPB createSCMProxy(
+ InetSocketAddress scmAddress) throws IOException {
+ Configuration hadoopConf =
+ LegacyHadoopConfigurationSource.asHadoopConfiguration(conf);
+ RPC.setProtocolEngine(hadoopConf, ScmBlockLocationProtocol.class,
+ ProtobufRpcEngine.class);
+ return RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
+ scmAddress, UserGroupInformation.getCurrentUser(), hadoopConf,
+ NetUtils.getDefaultSocketFactory(hadoopConf),
+ (int)conf.getObject(SCMClientConfig.class).getRpcTimeOut());
+ }
+
+ public RetryPolicy getSCMBlockLocationRetryPolicy(String newLeader) {
+ RetryPolicy retryPolicy = new RetryPolicy() {
+ @Override
+ public RetryAction shouldRetry(Exception e, int retry,
+ int failover, boolean b) {
+ performFailoverToAssignedLeader(newLeader);
+ return getRetryAction(failover);
+ }
+ };
+ return retryPolicy;
+ }
+}
+
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java
new file mode 100644
index 0000000..99dc446
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMClientConfig.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import org.apache.hadoop.hdds.conf.Config;
+import org.apache.hadoop.hdds.conf.ConfigGroup;
+import org.apache.hadoop.hdds.conf.ConfigType;
+
+import java.util.concurrent.TimeUnit;
+
+import static org.apache.hadoop.hdds.conf.ConfigTag.CLIENT;
+import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.SCM;
+
+/**
+ * Config for SCM Block Client.
+ */
+@ConfigGroup(prefix = "hdds.scmclient")
+public class SCMClientConfig {
+ public static final String SCM_CLIENT_RPC_TIME_OUT = "rpc.timeout";
+ public static final String SCM_CLIENT_FAILOVER_MAX_RETRY =
+ "failover.max.retry";
+ public static final String SCM_CLIENT_RETRY_INTERVAL =
+ "failover.retry.interval";
+
+ @Config(key = SCM_CLIENT_RPC_TIME_OUT,
+ defaultValue = "15m",
+ type = ConfigType.TIME,
+ tags = {OZONE, SCM, CLIENT},
+ timeUnit = TimeUnit.MILLISECONDS,
+ description = "RpcClient timeout on waiting for the response from " +
+ "SCM. The default value is set to 15 minutes. " +
+ "If ipc.client.ping is set to true and this rpc-timeout " +
+ "is greater than the value of ipc.ping.interval, the effective " +
+ "value of the rpc-timeout is rounded up to multiple of " +
+ "ipc.ping.interval."
+ )
+ private long rpcTimeOut = 15 * 60 * 1000;
+
+ @Config(key = SCM_CLIENT_FAILOVER_MAX_RETRY,
+ defaultValue = "15",
+ type = ConfigType.INT,
+ tags = {OZONE, SCM, CLIENT},
+ description = "Max retry count for SCM Client when failover happens."
+ )
+ private int retryCount = 15;
+
+ @Config(key = SCM_CLIENT_RETRY_INTERVAL,
+ defaultValue = "2s",
+ type = ConfigType.TIME,
+ tags = {OZONE, SCM, CLIENT},
+ timeUnit = TimeUnit.MILLISECONDS,
+ description = "SCM Client timeout on waiting for the next connection " +
+ "retry to other SCM IP. The default value is set to 2 minutes. "
+ )
+ private long retryInterval = 2 * 1000;
+
+ public long getRpcTimeOut() {
+ return rpcTimeOut;
+ }
+
+ public void setRpcTimeOut(long timeOut) {
+ // As at the end this value should not exceed MAX_VALUE, as underlying
+ // Rpc layer SocketTimeout parameter is int.
+ if (rpcTimeOut > Integer.MAX_VALUE) {
+ this.rpcTimeOut = Integer.MAX_VALUE;
+ }
+ this.rpcTimeOut = timeOut;
+ }
+
+ public int getRetryCount() {
+ return retryCount;
+ }
+
+ public void setRetryCount(int retryCount) {
+ this.retryCount = retryCount;
+ }
+
+ public long getRetryInterval() {
+ return retryInterval;
+ }
+
+ public void setRetryInterval(long retryInterval) {
+ this.retryInterval = retryInterval;
+ }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
new file mode 100644
index 0000000..ec2a5b0
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/SCMProxyInfo.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.proxy;
+
+import com.google.common.base.Preconditions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.net.InetSocketAddress;
+
+/**
+ * Class to store SCM proxy info.
+ */
+public class SCMProxyInfo {
+ private String serviceId;
+ private String nodeId;
+ private String rpcAddrStr;
+ private InetSocketAddress rpcAddr;
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(SCMProxyInfo.class);
+
+ public SCMProxyInfo(String serviceID, String nodeID,
+ InetSocketAddress rpcAddress) {
+ Preconditions.checkNotNull(rpcAddress);
+ this.serviceId = serviceID;
+ this.nodeId = nodeID;
+ this.rpcAddrStr = rpcAddress.toString();
+ this.rpcAddr = rpcAddress;
+ if (rpcAddr.isUnresolved()) {
+ LOG.warn("SCM address {} for serviceID {} remains unresolved " +
+ "for node ID {} Check your ozone-site.xml file to ensure scm " +
+ "addresses are configured properly.",
+ rpcAddress, serviceId, nodeId);
+ }
+ }
+
+ public String toString() {
+ return new StringBuilder()
+ .append("nodeId=")
+ .append(nodeId)
+ .append(",nodeAddress=")
+ .append(rpcAddrStr).toString();
+ }
+
+ public InetSocketAddress getAddress() {
+ return rpcAddr;
+ }
+
+ public String getServiceId() {
+ return serviceId;
+ }
+
+ public String getNodeId() {
+ return nodeId;
+ }
+}
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java
new file mode 100644
index 0000000..e3bb058
--- /dev/null
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/scm/proxy/package-info.java
@@ -0,0 +1,22 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.proxy;
+
+/**
+ * This package contains classes related to scm proxy.
+ */
diff --git a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
index 7d59bd7..bc5193f 100644
--- a/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
+++ b/hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto
@@ -70,6 +70,8 @@ message SCMBlockLocationResponse {
optional string leaderOMNodeId = 6;
+ optional string leaderSCMNodeId = 7;
+
optional AllocateScmBlockResponseProto allocateScmBlockResponse = 11;
optional DeleteScmKeyBlocksResponseProto deleteScmKeyBlocksResponse = 12;
optional hadoop.hdds.GetScmInfoResponseProto getScmInfoResponse = 13;
@@ -116,6 +118,7 @@ enum Status {
INTERNAL_ERROR = 29;
FAILED_TO_INIT_PIPELINE_CHOOSE_POLICY = 30;
FAILED_TO_INIT_LEADER_CHOOSE_POLICY = 31;
+ SCM_NOT_LEADER = 32;
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
index a04e168..cbb64c1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocolServerSideTranslatorPB.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.server.SCMBlockProtocolServer;
import org.apache.hadoop.hdds.server.OzoneProtocolMessageDispatcher;
import org.apache.hadoop.ozone.common.BlockGroup;
import org.apache.hadoop.ozone.common.DeleteBlockGroupResult;
@@ -94,9 +95,26 @@ public final class ScmBlockLocationProtocolServerSideTranslatorPB
.setTraceID(traceID);
}
+ private boolean isLeader() throws ServiceException {
+ if (!(impl instanceof SCMBlockProtocolServer)) {
+ throw new ServiceException("Should be SCMBlockProtocolServer");
+ } else {
+ return ((SCMBlockProtocolServer) impl).getScm().checkLeader();
+ }
+ }
+
@Override
public SCMBlockLocationResponse send(RpcController controller,
SCMBlockLocationRequest request) throws ServiceException {
+ if (!isLeader()) {
+ SCMBlockLocationResponse.Builder response = createSCMBlockResponse(
+ request.getCmdType(),
+ request.getTraceID());
+ response.setSuccess(false);
+ response.setStatus(Status.SCM_NOT_LEADER);
+ response.setLeaderSCMNodeId(null);
+ return response.build();
+ }
return dispatcher.processRequest(
request,
this::processMessage,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
index a5d3419..35af3cf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java
@@ -300,6 +300,10 @@ public class SCMBlockProtocolServer implements
}
}
+ public StorageContainerManager getScm() {
+ return scm;
+ }
+
@Override
public List<DatanodeDetails> sortDatanodes(List<String> nodes,
String clientMachine) throws IOException {
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 3d1ad72..04b8bb8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -1031,6 +1031,25 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
return replicationManager;
}
+ /**
+ * Check if the current scm is the leader.
+ * @return - if the current scm is the leader.
+ */
+ public boolean checkLeader() {
+ return scmHAManager.isLeader();
+ }
+
+ /**
+ * Get suggested leader from Raft.
+ * @return - suggested leader address.
+ */
+ public String getSuggestedLeader() {
+ if (scmHAManager.getSuggestedLeader() == null) {
+ return null;
+ }
+ return scmHAManager.getSuggestedLeader().getAddress();
+ }
+
public void checkAdminAccess(String remoteUser) throws IOException {
if (remoteUser != null && !scmAdminUsernames.contains(remoteUser)) {
throw new IOException(
diff --git a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
index e58af8b..898cd7c 100644
--- a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
+++ b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java
@@ -69,6 +69,7 @@ import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideT
import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider;
import org.apache.hadoop.hdds.security.x509.SecurityConfig;
import org.apache.hadoop.hdds.security.x509.certificate.client.CertificateClient;
import org.apache.hadoop.hdds.security.x509.certificate.client.OMCertificateClient;
@@ -187,7 +188,6 @@ import org.apache.commons.lang3.tuple.Pair;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_BLOCK_TOKEN_ENABLED_DEFAULT;
-import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
import static org.apache.hadoop.hdds.security.x509.certificates.utils.CertificateSignRequest.getEncodedString;
import static org.apache.hadoop.hdds.server.ServerUtils.getRemoteUserName;
@@ -824,16 +824,9 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
OzoneConfiguration conf) throws IOException {
RPC.setProtocolEngine(conf, ScmBlockLocationProtocolPB.class,
ProtobufRpcEngine.class);
- long scmVersion =
- RPC.getProtocolVersion(ScmBlockLocationProtocolPB.class);
- InetSocketAddress scmBlockAddress =
- getScmAddressForBlockClients(conf);
ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
new ScmBlockLocationProtocolClientSideTranslatorPB(
- RPC.getProxy(ScmBlockLocationProtocolPB.class, scmVersion,
- scmBlockAddress, UserGroupInformation.getCurrentUser(), conf,
- NetUtils.getDefaultSocketFactory(conf),
- Client.getRpcTimeout(conf)));
+ new SCMBlockLocationFailoverProxyProvider(conf));
return TracingUtil
.createProxy(scmBlockLocationClient, ScmBlockLocationProtocol.class,
conf);
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org