You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sz...@apache.org on 2022/10/25 01:15:09 UTC

[ozone] branch HDDS-4454 updated (c84da64fbc -> 653a120236)

This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a change to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git


 discard c84da64fbc HDDS-6955. [Ozone-streaming] Add explicit stream flag in ozone shell (#3559)
 discard 04ebe89843 HDDS-6867.  [Ozone-Streaming] PutKeyHandler should not use streaming to put EC key. (#3516)
 discard 4c064f42aa HDDS-6842. [Ozone-Streaming] Reduce the number of watch requests in StreamCommitWatcher. (#3492)
 discard f850883767 HDDS-6592. [Ozone-Streaming] Fix ContainerStateMachine#applyTransaction assert error (#3315)
 discard a2501c3de3 HDDS-5666. Add option to createKey via streaming api in Freon (#2574)
 discard 1dddbec252 HDDS-6500. [Ozone-Streaming] Buffer the PutBlockRequest at the end of the stream. (#3229)
 discard 0a432aa0eb HDDS-6137. [Ozone-Streaming] Refactor KeyDataStreamOutput. (#3195)
 discard 22e77a7b3d HDDS-5798. [Ozone-Streaming] Setup TlsConf parameters. (#3207)
 discard caa3b30a1e HDDS-6388. [Ozone-Streaming] Streaming write support both pipeline model and star model (#3145)
 discard c1bc998af6 HDDS-6355. [Ozone-Streaming] Fix CheckStyle problem (#3119)
 discard de690f6c25 HDDS-6229. [Ozone-Streaming] Data Channel abstraction on datanode (#3023)
 discard 9d24f58704 HDDS-6282. Fix BlockDataStreamOutput#doFlushIfNeeded NPE (#3060)
 discard 576c359114 HDDS-5487. [Ozone-Streaming] BlockDataStreamOutput support FlushDelay.  (#3002)
 discard f59d30da45 HDDS-6298. Add XceiverServerRatis stream config (#3070)
 discard 54361e5a4d HDDS-6138.[Ozone-Streaming] Define a limit on the size of the retry bufferList. (#2946)
 discard fdd7b64156 HDDS-6281. Update ratis version to 2.3.0-94db58b-SNAPSHOT version (#3059)
 discard 151ee53cd7 HDDS-6178. [Ozone-Streaming] Fix NPE in HDDS-6139. (#2984)
 discard 6bd6cc7f2d HDDS-6139. [Ozone-Streaming] Fix incorrect computation of totalAckDataLength. (#2978)
 discard 1ecb774a39 HDDS-6130. [Ozone-Streaming]  When releaseBuffers will get “Couldn 't find the required future” (#2939)
 discard 750c5f475a HDDS-6039. Define a minimum packet size during streaming writes. (#2883)
 discard 3b9141e18d HDDS-5851. [Ozone-Streaming] Define a PutBlock/maxBuffer fixed boundary for streaming writes. (#2866)
 discard 4194bd6ce0 HDDS-5743. [Ozone-Streaming] Add option to write files via streaming api in ofs and o3fs. (#2770)
 discard 9a16e41e1f HDDS-5879. [Ozone-Streaming] OzoneBucket add the createMultipartStreamKey method (#2760)
 discard 65ec5817e2 HDDS-5961. [Ozone-Streaming] update the usage space of Containers in the stream write (#2833)
 discard bb4eef2f7c HDDS-5987. [Ozone-Streaming] Add XceiverClientRatis stream config (#2841)
 discard 311b366526 HDDS-5763. Provide an Executor for each LocalStream in ContainerStateMachine (#2782)
 discard ca7a926165 HDDS-5895. [Ozone-Streaming] Make raft.server.data-stream.client.pool.size configurable (#2766)
 discard 508fda8744 HDDS-5674.[Ozone-Streaming] Handle client retries on exception (#2701)
 discard 79507fe72f HDDS-5849. [Ozone-Streaming]Write exceptions occur after checksum is enabled (#2729)
 discard 10b14300d2 HDDS-5486. [Ozone-Streaming] Streaming supports writing in Pipline mode (#2682)
 discard b7b16c6977 HDDS-5742. Avoid unnecessary Bytebuffer conversions (#2673)
 discard 74c5f0f5fc HDDS-5705. [Ozone-Streaming] Change ByteBufStreamOutput to ByteBufferStreamOutput (#2603)
 discard 6b51811d1f HDDS-5599.  [Ozone-Streaming]drop BufferPool and ChunkBuffer to avoid buffer copying (#2557)
 discard 18db09d7d4 HDDS-5488. [Ozone-Streaming] Add a new BlockOutputStream/KeyOutputStream to support streaming api (#2495)
 discard 0a4bbf5c7b HDDS-5480. [Ozone-Streaming] Client and server should support stream setup. (#2452)
 discard 444b7b3840 HDDS-5481. Fix stream() and link() method in ContainerStateMachine. (#2451)
 discard 425bc95539 HDDS-5452. Add link method to ContainerStateMachine for Ratis streaming (#2422)
 discard b3495e8d39 HDDS-5366.  [Ozone-Streaming] Implement stream method to ContainerStateMachine. (#2358).  Contributed by mingchao zhao
     add 15fcb733c7 HDDS-7209. Bump Jersey2 to 2.34 (#3745)
     add 64ec833e2b HDDS-7192. EC: ReplicationManager - create handlers to perform various container checks (#3743)
     add 9ad75b2300 HDDS-7114. CHAINED and STAND_ALONE replication-type should be removed from help message (#3672)
     add 286dffa8fa HDDS-7193. S3gateway Grpc Ranger Authorizer NPE Unhandled Exception (#3749)
     add 3130b03846 HDDS-7144. Recon: Make only hostname fixed in Datanodes page. (#3736)
     add 1dd5315704 HDDS-7225. Add Ratis tests for HealthCheck handlers of Replication Manager (#3757)
     add 1cf5678224 HDDS-7207. Fixed typo "layload" in om-echo command description as payload (#3756)
     add 1107fe711c HDDS-7221. EC: ReplicationManager - Encapsulate the under and over rep queues into a queue object (#3758)
     add 9d3bd02215 HDDS-7185. Encode asterisk for parsing signature (#3726)
     add d9b92e7a33 HDDS-7203. LookupKey Latency breakdown (#3742)
     add 1a6b4d2d6f HDDS-7239. EC: Add a Handler for CLOSING containers in Replication Manager (#3769)
     add 6c1a5ee07e HDDS-7240. List all volume operation should go through ACL check as well in order to trigger audit logging (#3770)
     add 04efea814a HDDS-7251. Replace Log4j 1.x with Reload4j (#3773)
     add f6f89a021c HDDS-7075. Implement FileSystem listStatusIterator to support file list paging (#3711)
     add c7038e08a8 HDDS-7039. EC: Handle the placement policy check in ECUnderReplicationHandler (#3645)
     add a8df5c530b HDDS-7145. Recon: Show last OM DB sync time on Overview page (#3765)
     add 69695eabb7 HDDS-6492. Add metric for failed container moves (#3751)
     add d22e42a2ac HDDS-7160. Recon: Make Container and Bucket stats clickable in Overviews page (#3775)
     add cb886035ac HDDS-7257. Bump snakeyaml to 1.32 (#3776)
     add 55efab1b3e HDDS-6863. Add Group-Id & Ratis-Roles Information for OM UI (#3520)
     add 718f7309a8 HDDS-7222. Recon Inactive pipelines not shown. (#3753)
     add efb3a80f73 HDDS-7232. Introduce container-level lock while handling container report events by SCM and Recon (#3766)
     add c96a342448 HDDS-7263. Add a handler for Quasi Closed containers to RM (#3785)
     add 1ff97328b7 HDDS-7206. Change the placement policy interface to allow existing nodes to be specified for Rack Scatter Policy. (#3744)
     add cdd0fb4ce1 HDDS-7229. Introduce container location cache in ScmClient (#3771)
     add 8dbbb4eb5f HDDS-7268. EC: Fix tests for HealthCheck handlers of RM that use Replica Indexes for Ratis Containers (#3793)
     add d029910e75 HDDS-7283. Set coreSize equal to maxSize for threadPoolExecutor (#3792)
     add 649d713817 HDDS-7235. EOFException occurs when executing TPC-DS using o3fs (#3764)
     add 3b6ae4f761 HDDS-7285. Fix concurrency issue in DeleteBlocksCommandHandler (#3796)
     add 23a0f743bc HDDS-7219. Remove not applicable info from ozone bucket list for links (#3777)
     add 1585f660c1 HDDS-7250. Add HttpServer metrics (#3772)
     add 0aabe8abb6 HDDS-7282. Add EstimatedKeyCount metrics for SCM DB (#3791)
     add 4e364001ac HDDS-7211. Remove dead code from KeyManagerImpl (#3747)
     add 0edb2ecfe4 HDDS-7217. OM logs wrong bucket layout (#3750)
     add 2737d3d065 HDDS-7208. Erasure coding and encryption are not flagged on FileStatus (#3768)
     add e0ea7df9e5 HDDS-7259. Fix uncounted blocksDeleted in BlockDeletingService (#3779)
     add 099422c5db HDDS-7293. Bump jackson2 to 2.13.4 (#3808)
     add 625e84f95e HDDS-7286. Clean up ContainerManager (#3797)
     add e7f4c05d09 HDDS-7294. Move ozone manager background services to a separate package (#3805)
     add 69d4dbd95a HDDS-7302. Fix inaccurate container block count caused by missing block (#3814)
     add 17753bc93c HDDS-7287. Send deleteBlocksRequest with correct retry count (#3799)
     add b808deb8c8 HDDS-6664. Implements getUri in TrashOzoneFileSystem (#3795)
     add 32df39e8c8 HDDS-7299. Migrate simple tests in hdds-container-service to JUnit5 (#3813)
     add 834ef98018 HDDS-7230. Implement GetKeyInfo API (#3780)
     add f0040c294b HDDS-7304. EC: EC Decode can fail when byteBuffer from elastic pool is larger than chunksize (#3817)
     add afb47176b7 HDDS-7271. Ozone Integration test shows memory leak (graceful shutdown cleanup) (#3787)
     add bb46766e23 Revert "HDDS-7271. Ozone Integration test shows memory leak (graceful shutdown cleanup) (#3787)"
     add 74deb5959a HDDS-7236. Enable Recon SCM DB bootstrap by default. (#3823)
     add 0a5fc405e5 HDDS-6440. Handle custom metadata (x-amz-meta) during put-object through S3 API. (#3728)
     add e529ed4586 HDDS-7088. OM incorrectly detects SCM Ratis Group ID when OM and SCM are colocated with same Ratis storage directory. (#3809)
     add 54af0dacb0 HDDS-7313. Update github actions for Node16 (#3825)
     add 15217fe59a HDDS-7271. Ozone Integration test shows memory leak (graceful shutdown cleanup) (#3826)
     add a905b4bbb6 HDDS-6946. Bump Hadoop to 3.3.4 (#3557)
     add 2bfb6d9ec5 HDDS-7315 SCM order of close of dbstore and other services (#3830)
     add b18fb595e4 HDDS-7308 Removed cluster2 from serviceIds (#3819)
     add 4515fe20c2 HDDS-7291. Fixing exception handling in case of non positive replica index (#3806)
     add 58f0749b52 HDDS-7270. Fix bug in checking healthy replica placement policy check in EC underReplication handler (#3828)
     add 6e2de99324 HDDS-7308. Fix config example syntax highlight in OM HA (#3833)
     add 330b2e308d HDDS-7322. Remove unused dependencies hsqldb and jettison (#3834)
     add 46e58a6350 HDDS-7300. Race condition between full data scan and block deletion (#3811)
     add 02341d77f3 HDDS-7269. Remove fair lock configuration for container state management flows. (#3815)
     add 35eedb6b36 HDDS-7303. EC: ECBlockReconstructedStripeInputStream should set initialized only at the end of init() (#3816)
     add c45b31a4f4 HDDS-7330. Cleanup of BucketManagerImpl (#3838)
     add c0de6bcdeb HDDS-7261. Add container location cache metrics. (#3829)
     add 554eb9eb54 HDDS-7337. Replace set-output in Github Actions workflow (#3839)
     add 9d05818a87 HDDS-7343. Do not log exception on file not found in getFileStatus() (#3849)
     add 2dc800dc80 HDDS-7214. Continuous start & stop can have hanging threads in stopping (#3782)
     add f7ba4ba588 HDDS-7340. Bump jackson-databind to 2.13.4.2 (#3845)
     add 02c266d3fe HDDS-6893. EC: ReplicationManager - move the empty container handling into RM from Legacy (#3831)
     add 11f4686ce8 HDDS-7254. Document that moving SCM from non-HA to HA is currently unsupported. (#3846)
     add 03e670b865 HDDS-7149. Update ratis version to 2.4.0 and thirdparty version to 1.0.2. (#3855)
     add a6316c8320 HDDS-7352. OM log flooded by AWSV4AuthValidator (#3857)
     add 237a9a1594 HDDS-7058. EC: ReplicationManager - Implement ratis container replication check handler (#3802)
     add 977ab59e11 HDDS-7341. EC: Close pipelines with unregistered nodes (#3850)
     add d5dc65eaa0 HDDS-7305. Fix Hadoop imports (#3822)
     add d32e96c835 HDDS-7351. Use jackson-bom to ensure consistent Jackson version (#3856)
     add e45f9b8333 HDDS-7199. Implement new mix workload Read/Write Freon command which meets specific test requirements (#3754)
     add ff6d15f5df HDDS-7354. SchemaV3 blockData not deleted in table (#3860)
     add 3fd7cd0896 Revert "HDDS-7199. Implement new mix workload Read/Write Freon command which meets specific test requirements (#3754)"
     add 1fa6d02e8a HDDS-6930. SCM,OM,RECON should not print ERROR and exit with code 1 on successful shutdown (#3848)
     add df48ca4fe3 HDDS-7356. Update SCM-HA.zh.md to match the English version (#3861)
     add 31560fcdfe HDDS-7355. non-primordial scm fail to get signed cert from primordial SCM when converting an unsecure cluster to secure (#3859)
     add f9b74a25cf HDDS-6210. EC: Add EC metrics (#3851)
     add 5c2a39365a HDDS-7369. Fix wrong order of command arguments in Nonrolling-Upgrade.md (#3866)
     add 13a6d0178e HDDS-7141. Recon: Improve Disk Usage Page (#3789)
     add ae59f8ae5f HDDS-7248. Recon: Expand the container status page to show all unhealthy container states (#3837)
     add ecdfc20756 HDDS-7199. Implement new mix workload Read/Write Freon command (#3872)
     add fdc57a93cb HDDS-7403. README Security Improvement (#3879)
     add df0d1e81e3 HDDS-7368. [Multi-Tenant] Add Volume Existence check in preExecute for OMTenantCreateRequest (#3869)
     new e15e987a41 HDDS-5366.  [Ozone-Streaming] Implement stream method to ContainerStateMachine. (#2358).  Contributed by mingchao zhao
     new 5626566ba7 HDDS-5452. Add link method to ContainerStateMachine for Ratis streaming (#2422)
     new 01beaf8f43 HDDS-5481. Fix stream() and link() method in ContainerStateMachine. (#2451)
     new 0568725ac9 HDDS-5480. [Ozone-Streaming] Client and server should support stream setup. (#2452)
     new 0952195d73 HDDS-5488. [Ozone-Streaming] Add a new BlockOutputStream/KeyOutputStream to support streaming api (#2495)
     new ae462a6747 HDDS-5599.  [Ozone-Streaming]drop BufferPool and ChunkBuffer to avoid buffer copying (#2557)
     new 81b9f3d7c1 HDDS-5705. [Ozone-Streaming] Change ByteBufStreamOutput to ByteBufferStreamOutput (#2603)
     new b2133a5bad HDDS-5742. Avoid unnecessary Bytebuffer conversions (#2673)
     new 279168014c HDDS-5486. [Ozone-Streaming] Streaming supports writing in Pipline mode (#2682)
     new b1f06e8480 HDDS-5849. [Ozone-Streaming]Write exceptions occur after checksum is enabled (#2729)
     new 5b73b5148f HDDS-5674.[Ozone-Streaming] Handle client retries on exception (#2701)
     new 11fb9bf708 HDDS-5895. [Ozone-Streaming] Make raft.server.data-stream.client.pool.size configurable (#2766)
     new 7cbb3fa89c HDDS-5763. Provide an Executor for each LocalStream in ContainerStateMachine (#2782)
     new 3cf08778de HDDS-5987. [Ozone-Streaming] Add XceiverClientRatis stream config (#2841)
     new 8cbadbcaf6 HDDS-5961. [Ozone-Streaming] update the usage space of Containers in the stream write (#2833)
     new 8de50af361 HDDS-5879. [Ozone-Streaming] OzoneBucket add the createMultipartStreamKey method (#2760)
     new f3a245157a HDDS-5743. [Ozone-Streaming] Add option to write files via streaming api in ofs and o3fs. (#2770)
     new eb441a8e82 HDDS-5851. [Ozone-Streaming] Define a PutBlock/maxBuffer fixed boundary for streaming writes. (#2866)
     new 7e61b1f642 HDDS-6039. Define a minimum packet size during streaming writes. (#2883)
     new ac981a7722 HDDS-6130. [Ozone-Streaming]  When releaseBuffers will get “Couldn 't find the required future” (#2939)
     new 25c3495e58 HDDS-6139. [Ozone-Streaming] Fix incorrect computation of totalAckDataLength. (#2978)
     new 33c0260ab5 HDDS-6178. [Ozone-Streaming] Fix NPE in HDDS-6139. (#2984)
     new b3ec54827f HDDS-6281. Update ratis version to 2.3.0-94db58b-SNAPSHOT version (#3059)
     new 693eea7ae6 HDDS-6138.[Ozone-Streaming] Define a limit on the size of the retry bufferList. (#2946)
     new 1a31d6beef HDDS-6298. Add XceiverServerRatis stream config (#3070)
     new 434d1fc57d HDDS-5487. [Ozone-Streaming] BlockDataStreamOutput support FlushDelay.  (#3002)
     new cca5e3b020 HDDS-6282. Fix BlockDataStreamOutput#doFlushIfNeeded NPE (#3060)
     new 3dde9e27b9 HDDS-6229. [Ozone-Streaming] Data Channel abstraction on datanode (#3023)
     new b2ec47d5d3 HDDS-6355. [Ozone-Streaming] Fix CheckStyle problem (#3119)
     new 8bbe943d23 HDDS-6388. [Ozone-Streaming] Streaming write support both pipeline model and star model (#3145)
     new 8e6be787c9 HDDS-5798. [Ozone-Streaming] Setup TlsConf parameters. (#3207)
     new 99b414df25 HDDS-6137. [Ozone-Streaming] Refactor KeyDataStreamOutput. (#3195)
     new f2d574ce3e HDDS-6500. [Ozone-Streaming] Buffer the PutBlockRequest at the end of the stream. (#3229)
     new 4a5b68370b HDDS-5666. Add option to createKey via streaming api in Freon (#2574)
     new 7805efb012 HDDS-6592. [Ozone-Streaming] Fix ContainerStateMachine#applyTransaction assert error (#3315)
     new a5980ce1a4 HDDS-6842. [Ozone-Streaming] Reduce the number of watch requests in StreamCommitWatcher. (#3492)
     new 29a8c157fd HDDS-6867.  [Ozone-Streaming] PutKeyHandler should not use streaming to put EC key. (#3516)
     new 653a120236 HDDS-6955. [Ozone-streaming] Add explicit stream flag in ozone shell (#3559)

This update added new revisions after undoing existing revisions.
That is to say, some revisions that were in the old version of the
branch are not in the new version.  This situation occurs
when a user --force pushes a change and generates a repository
containing something like this:

 * -- * -- B -- O -- O -- O   (c84da64fbc)
            \
             N -- N -- N   refs/heads/HDDS-4454 (653a120236)

You should already have received notification emails for all of the O
revisions, and so the following emails describe only the N revisions
from the common base, B.

Any revisions marked "omit" are not gone; other references still
refer to them.  Any revisions marked "discard" are gone forever.

The 38 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .github/workflows/close-pending.yaml               |    2 +-
 .github/workflows/comments.yaml                    |    2 +-
 .github/workflows/post-commit.yml                  |   73 +-
 README.md                                          |    2 +-
 dev-support/ci/lib/_initialization.sh              |    4 +-
 hadoop-hdds/client/pom.xml                         |    2 +-
 .../hadoop/hdds/scm/XceiverClientMetrics.java      |   10 +
 .../hadoop/hdds/scm/storage/BlockInputStream.java  |    2 +-
 .../hadoop/hdds/scm/storage/ChunkInputStream.java  |    2 +-
 .../hadoop/ozone/client/io/ECBlockInputStream.java |    2 +-
 .../ozone/client/io/ECBlockInputStreamProxy.java   |    7 +
 .../client/io/ECBlockReconstructedInputStream.java |    2 +-
 .../io/ECBlockReconstructedStripeInputStream.java  |   13 +-
 .../hdds/scm/storage/TestChunkInputStream.java     |    9 +-
 .../ozone/client/io/TestECBlockInputStream.java    |    3 +-
 .../TestECBlockReconstructedStripeInputStream.java |   20 +
 hadoop-hdds/common/pom.xml                         |    8 +-
 .../hadoop/hdds/client/RatisReplicationConfig.java |    2 +-
 .../hadoop/hdds/protocol/DatanodeDetails.java      |   11 +
 .../apache/hadoop/hdds/scm/PlacementPolicy.java    |   18 +-
 .../protocol/StorageContainerLocationProtocol.java |    2 +-
 .../java/org/apache/hadoop/ozone/OzoneConsts.java  |    3 +
 .../java/org/apache/hadoop/util/CacheMetrics.java  |  103 ++
 .../org/apache/hadoop/util/CheckedRunnable.java    |   25 +-
 .../org/apache/hadoop/util/CheckedSupplier.java    |   29 +
 .../java/org/apache/hadoop/util/MetricUtil.java    |   52 +
 .../common/src/main/resources/ozone-default.xml    |   27 +-
 hadoop-hdds/container-service/pom.xml              |    7 +-
 .../apache/hadoop/ozone/HddsDatanodeService.java   |    1 +
 .../container/common/report/ReportPublisher.java   |    6 +-
 .../common/statemachine/DatanodeStateMachine.java  |    8 +-
 .../common/statemachine/EndpointStateMachine.java  |    1 +
 .../commandhandler/DeleteBlocksCommandHandler.java |   33 +-
 .../DeleteContainerCommandHandler.java             |   11 +-
 .../ECReconstructionCoordinator.java               |   15 +-
 .../ec/reconstruction/ECReconstructionMetrics.java |   80 ++
 .../container/keyvalue/KeyValueContainer.java      |    4 +-
 .../container/keyvalue/KeyValueContainerCheck.java |  108 +-
 .../background/BlockDeletingService.java           |   70 +-
 .../protocol/commands/DeleteContainerCommand.java  |    6 +
 .../metadata/TestDatanodeCRLStoreImpl.java         |   24 +-
 .../hadoop/ozone/TestHddsDatanodeService.java      |   16 +-
 .../hadoop/ozone/TestHddsSecureDatanodeInit.java   |   96 +-
 .../container/common/TestBlockDeletingService.java |  137 ++-
 .../common/TestContainerLayoutVersion.java         |    4 +-
 .../common/TestDatanodeLayOutVersion.java          |   10 +-
 .../container/common/TestDatanodeStateMachine.java |   52 +-
 .../common/helpers/TestContainerUtils.java         |    4 +-
 .../container/common/report/TestReportManager.java |    2 +-
 .../common/report/TestReportPublisher.java         |   39 +-
 .../statemachine/TestDatanodeConfiguration.java    |    4 +-
 .../common/statemachine/TestStateContext.java      |   22 +-
 .../states/datanode/TestRunningDatanodeState.java  |    8 +-
 .../states/endpoint/TestHeartbeatEndpointTask.java |   74 +-
 .../volume/TestCapacityVolumeChoosingPolicy.java   |   31 +-
 .../volume/TestRoundRobinVolumeChoosingPolicy.java |   35 +-
 .../TestECReconstructionSupervisor.java            |    9 +-
 .../keyvalue/TestKeyValueContainerCheck.java       |    4 +-
 .../TestKeyValueHandlerWithUnhealthyContainer.java |   15 +-
 .../container/keyvalue/helpers/TestChunkUtils.java |   20 +-
 .../keyvalue/impl/AbstractTestChunkManager.java    |   20 +-
 .../keyvalue/impl/CommonChunkManagerTestCases.java |   10 +-
 .../keyvalue/impl/TestChunkManagerDummyImpl.java   |    4 +-
 .../keyvalue/impl/TestFilePerBlockStrategy.java    |    6 +-
 .../keyvalue/impl/TestFilePerChunkStrategy.java    |    8 +-
 .../TestContainerScannerConfiguration.java         |    8 +-
 .../ozoneimpl/TestContainerScannerMetrics.java     |   21 +-
 .../ReplicationSupervisorScheduling.java           |    8 +-
 .../replication/TestGrpcOutputStream.java          |   16 +-
 .../replication/TestMeasuredReplicator.java        |   36 +-
 .../replication/TestReplicationConfig.java         |    4 +-
 .../replication/TestSimpleContainerDownloader.java |   19 +-
 .../stream/TestDirstreamClientHandler.java         |   34 +-
 .../container/stream/TestStreamingServer.java      |   18 +-
 .../TestReconstructionECContainersCommands.java    |   27 +-
 .../docs/content/feature/Nonrolling-Upgrade.md     |    6 +-
 hadoop-hdds/docs/content/feature/OM-HA.md          |    4 +-
 hadoop-hdds/docs/content/feature/OM-HA.zh.md       |    4 +-
 hadoop-hdds/docs/content/feature/SCM-HA.md         |    7 +
 hadoop-hdds/docs/content/feature/SCM-HA.zh.md      |   73 +-
 hadoop-hdds/framework/pom.xml                      |   24 +-
 ...inerLocationProtocolClientSideTranslatorPB.java |    2 +-
 .../hadoop/hdds/server/http/HttpServer2.java       |    3 +
 .../hdds/server/http/HttpServer2Metrics.java       |   94 ++
 .../hdds/server/http/RatisDropwizardExports.java   |   14 +
 .../server/http/TestHttpServer2MetricsTest.java    |   94 ++
 hadoop-hdds/hadoop-dependency-client/pom.xml       |   18 +-
 hadoop-hdds/hadoop-dependency-server/pom.xml       |   28 +
 hadoop-hdds/server-scm/pom.xml                     |   10 +
 .../hadoop/hdds/scm/SCMCommonPlacementPolicy.java  |   51 +-
 .../hadoop/hdds/scm/block/DeletedBlockLogImpl.java |   14 +-
 .../hdds/scm/block/SCMBlockDeletingService.java    |    2 +-
 .../hdds/scm/container/ContainerManager.java       |   14 +-
 .../hdds/scm/container/ContainerManagerImpl.java   |   27 +-
 .../hdds/scm/container/ContainerReportHandler.java |    7 +-
 .../scm/container/ContainerStateManagerImpl.java   |  174 +--
 .../IncrementalContainerReportHandler.java         |    5 +-
 .../scm/container/balancer/ContainerBalancer.java  | 1159 ++------------------
 .../balancer/ContainerBalancerMetrics.java         |   29 +
 ...nerBalancer.java => ContainerBalancerTask.java} |  638 +++--------
 .../algorithms/SCMContainerPlacementCapacity.java  |   12 +-
 .../algorithms/SCMContainerPlacementRackAware.java |   14 +-
 .../SCMContainerPlacementRackScatter.java          |  264 +++--
 .../algorithms/SCMContainerPlacementRandom.java    |   13 +-
 .../replication/ContainerCheckRequest.java         |  120 ++
 .../replication/ContainerHealthCheck.java          |   35 -
 .../replication/ContainerHealthResult.java         |   78 +-
 .../replication/ECContainerHealthCheck.java        |   86 --
 .../replication/ECOverReplicationHandler.java      |   22 +-
 .../replication/ECUnderReplicationHandler.java     |  136 ++-
 .../replication/LegacyReplicationManager.java      |    1 -
 .../RatisContainerReplicaCount.java                |   87 +-
 .../container/replication/ReplicationManager.java  |  221 ++--
 .../container/replication/ReplicationQueue.java    |   73 ++
 .../replication/health/AbstractCheck.java          |   71 ++
 .../ClosedWithMismatchedReplicasHandler.java       |   89 ++
 .../health/ClosingContainerHandler.java            |   62 ++
 .../health/ECReplicationCheckHandler.java          |  134 +++
 .../replication/health/EmptyContainerHandler.java  |  141 +++
 .../container/replication/health/HealthCheck.java  |   56 +
 .../replication/health/OpenContainerHandler.java   |   81 ++
 .../health/QuasiClosedContainerHandler.java        |  131 +++
 .../health/RatisReplicationCheckHandler.java       |  201 ++++
 .../container/replication/health/package-info.java |   10 +-
 .../container/report/ContainerReportValidator.java |  104 ++
 .../hdds/scm/container/report/package-info.java    |    8 +-
 .../scm/container/states/ContainerAttribute.java   |   14 +-
 .../hadoop/hdds/scm/ha/BackgroundSCMService.java   |    2 +-
 .../apache/hadoop/hdds/scm/ha/SCMStateMachine.java |    7 +-
 .../hdds/scm/metadata/SCMMetadataStoreImpl.java    |   51 +-
 .../hdds/scm/metadata/SCMMetadataStoreMetrics.java |  116 ++
 .../hdds/scm/pipeline/PipelineManagerImpl.java     |   42 +-
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java |   10 +-
 .../hdds/scm/server/SCMClientProtocolServer.java   |    2 +-
 .../hdds/scm/server/StorageContainerManager.java   |   31 +-
 .../org/apache/hadoop/hdds/scm/HddsTestUtils.java  |   76 +-
 .../hadoop/hdds/scm/block/TestDeletedBlockLog.java |   19 +-
 .../scm/container/TestContainerManagerImpl.java    |   12 +-
 .../scm/container/TestContainerReportHandler.java  |   59 +
 .../TestIncrementalContainerReportHandler.java     |  124 ++-
 .../container/balancer/TestContainerBalancer.java  |  953 ++--------------
 ...alancer.java => TestContainerBalancerTask.java} |  218 ++--
 .../algorithms/TestContainerPlacementFactory.java  |    4 +-
 .../TestSCMContainerPlacementRackScatter.java      |   76 +-
 .../container/replication/ReplicationTestUtil.java |   89 +-
 .../replication/TestECOverReplicationHandler.java  |    5 +-
 .../replication/TestECUnderReplicationHandler.java |   40 +-
 .../TestRatisContainerReplicaCount.java            |   58 +-
 .../replication/TestReplicationManager.java        |  102 +-
 .../TestClosedWithMismatchedReplicasHandler.java   |  217 ++++
 .../health/TestClosingContainerHandler.java        |  208 ++++
 .../TestECReplicationCheckHandler.java}            |  199 +++-
 .../health/TestEmptyContainerHandler.java          |  233 ++++
 .../health/TestOpenContainerHandler.java           |  168 +++
 .../health/TestQuasiClosedContainerHandler.java    |  238 ++++
 .../health/TestRatisReplicationCheckHandler.java   |  560 ++++++++++
 .../container/replication/health/package-info.java |   11 +-
 .../report/TestContainerReportValidator.java       |   77 ++
 .../hdds/scm/container/report/package-info.java    |   15 +-
 .../scm/metadata/TestSCMMetadataStoreImpl.java     |   77 ++
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    |    2 +-
 .../hdds/scm/pipeline/TestPipelineManagerImpl.java |   41 +-
 hadoop-hdds/test-utils/pom.xml                     |    4 +-
 hadoop-hdds/tools/pom.xml                          |    6 +-
 hadoop-ozone/client/pom.xml                        |    6 +-
 .../org/apache/hadoop/ozone/client/OzoneKey.java   |   32 +-
 .../hadoop/ozone/client/OzoneKeyDetails.java       |   11 +-
 .../client/checksum/BaseFileChecksumHelper.java    |    2 +-
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |    9 +-
 hadoop-ozone/common/pom.xml                        |   12 +-
 .../main/java/org/apache/hadoop/ozone/OmUtils.java |   39 +
 .../org/apache/hadoop/ozone/om/OMConfigKeys.java   |   11 +
 .../ozone/om/helpers/KeyInfoWithVolumeContext.java |  110 ++
 .../apache/hadoop/ozone/om/helpers/OmKeyArgs.java  |   55 +-
 .../ozone/om/protocol/OzoneManagerProtocol.java    |   13 +
 ...OzoneManagerProtocolClientSideTranslatorPB.java |   32 +-
 hadoop-ozone/csi/pom.xml                           |    6 +-
 hadoop-ozone/dist/src/main/license/bin/LICENSE.txt |    5 +-
 hadoop-ozone/dist/src/main/license/bin/NOTICE.txt  |    2 +-
 hadoop-ozone/dist/src/main/license/jar-report.txt  |    6 +-
 .../dist/src/main/smoketest/ec/basic.robot         |    6 +-
 .../src/main/smoketest/freon/read-write-key.robot  |   53 +
 .../dist/src/main/smoketest/s3/commonawslib.robot  |    5 +
 .../dist/src/main/smoketest/s3/objectputget.robot  |   26 +
 hadoop-ozone/integration-test/pom.xml              |   94 +-
 .../ozone/TestDirectoryDeletingServiceWithFSO.java |    4 +-
 .../hadoop/fs/ozone/TestOzoneFSInputStream.java    |   80 +-
 .../hadoop/fs/ozone/TestOzoneFileSystem.java       |  298 ++++-
 .../hadoop/fs/ozone/TestRootedDDSWithFSO.java      |    2 +-
 .../hadoop/fs/ozone/TestRootedOzoneFileSystem.java |  218 ++++
 .../TestContainerStateManagerIntegration.java      |    2 +-
 .../hdds/scm/storage/TestContainerCommandsEC.java  |   17 +-
 .../java/org/apache/hadoop/ozone/TestDataUtil.java |   10 +
 .../ozone/TestStorageContainerManagerHelper.java   |    1 -
 .../apache/hadoop/ozone/client/rpc/TestBCSID.java  |    1 -
 .../rpc/TestCloseContainerHandlingByClient.java    |    7 -
 .../rpc/TestContainerStateMachineFailures.java     |    1 -
 .../client/rpc/TestFailureHandlingByClient.java    |    5 -
 .../rpc/TestFailureHandlingByClientFlushDelay.java |    1 -
 .../rpc/TestMultiBlockWritesWithDnFailures.java    |    2 -
 .../client/rpc/TestOzoneAtRestEncryption.java      |    1 -
 .../client/rpc/TestOzoneRpcClientAbstract.java     |    5 +-
 .../client/rpc/TestOzoneRpcClientWithRatis.java    |    2 +-
 .../hadoop/ozone/client/rpc/TestReadRetries.java   |    2 +-
 .../ozone/client/rpc/TestSecureOzoneRpcClient.java |    1 -
 .../client/rpc/TestValidateBCSIDOnRestart.java     |    2 +-
 .../commandhandler/TestBlockDeletion.java          |    8 +-
 .../TestCloseContainerByPipeline.java              |    6 +-
 .../commandhandler/TestCloseContainerHandler.java  |    1 -
 .../commandhandler/TestDeleteContainerHandler.java |    1 -
 .../ozone/dn/ratis/TestDnRatisLogParser.java       |    1 -
 .../hadoop/ozone/dn/scanner/TestDataScanner.java   |    1 -
 .../ozone/om/TestContainerReportWithKeys.java      |    1 -
 .../apache/hadoop/ozone/om/TestKeyManagerImpl.java |   11 +-
 .../org/apache/hadoop/ozone/om/TestKeyPurging.java |    1 +
 .../hadoop/ozone/om/TestOMEpochForNonRatis.java    |    2 +-
 .../hadoop/ozone/om/TestOmBlockVersioning.java     |    2 -
 .../org/apache/hadoop/ozone/om/TestOmMetrics.java  |   61 +-
 .../ozone/om/TestOzoneManagerHAKeyDeletion.java    |    1 +
 .../ozone/om/multitenant/RangerUserRequest.java    |    3 +-
 .../TestRangerBGSyncService.java                   |    7 +-
 .../hadoop/ozone/recon/TestReconAsPassiveScm.java  |    4 +-
 .../apache/hadoop/ozone/recon/TestReconTasks.java  |    4 +-
 .../ozone/scm/TestSCMInstallSnapshotWithHA.java    |    5 +-
 .../ozone/scm/TestStorageContainerManagerHA.java   |    2 +-
 .../TestSCMPipelineBytesWrittenMetrics.java        |    2 +-
 .../hadoop/ozone/shell/TestOzoneShellHA.java       |   21 +-
 .../hadoop/ozone/shell/TestOzoneTenantShell.java   |    2 +-
 .../src/test/resources/testSequenceFile            |  Bin 0 -> 96 bytes
 .../src/main/proto/OmClientProtocol.proto          |   16 +
 .../apache/hadoop/ozone/om/BucketManagerImpl.java  |  103 +-
 .../org/apache/hadoop/ozone/om/KeyManager.java     |   11 +
 .../org/apache/hadoop/ozone/om/KeyManagerImpl.java |  805 +++-----------
 .../java/org/apache/hadoop/ozone/om/OMMXBean.java  |    2 +
 .../java/org/apache/hadoop/ozone/om/OMMetrics.java |   33 +
 .../hadoop/ozone/om/OMMultiTenantManager.java      |    2 +-
 .../hadoop/ozone/om/OMMultiTenantManagerImpl.java  |    2 +-
 .../hadoop/ozone/om/OMPerformanceMetrics.java      |  143 +++
 .../org/apache/hadoop/ozone/om/OzoneManager.java   |  144 ++-
 .../hadoop/ozone/om/OzonePrefixPathImpl.java       |    2 -
 .../java/org/apache/hadoop/ozone/om/ScmClient.java |   89 +-
 .../hadoop/ozone/om/TrashOzoneFileSystem.java      |    4 +-
 .../ozone/om/ratis/OzoneManagerRatisServer.java    |   13 +
 .../ozone/om/ratis/OzoneManagerStateMachine.java   |    8 +-
 .../om/request/bucket/OMBucketCreateRequest.java   |   13 +-
 .../ozone/om/request/key/OMKeyCommitRequest.java   |    6 +
 .../ozone/om/request/key/OMKeyCreateRequest.java   |    3 +
 .../request/s3/tenant/OMTenantCreateRequest.java   |   11 +
 .../om/{ => service}/DirectoryDeletingService.java |    3 +-
 .../ozone/om/{ => service}/KeyDeletingService.java |    6 +-
 .../OMRangerBGSyncService.java                     |    8 +-
 .../om/{ => service}/OpenKeyCleanupService.java    |    5 +-
 .../hadoop/ozone/om/service/package-info.java}     |    9 +-
 .../protocolPB/OzoneManagerRequestHandler.java     |   29 +-
 .../hadoop/ozone/security/AWSV4AuthValidator.java  |    4 +-
 .../webapps/ozoneManager/om-overview.html          |   20 +
 .../resources/webapps/ozoneManager/ozoneManager.js |   30 +
 .../org/apache/hadoop/ozone/om/OmTestManagers.java |    2 +-
 .../apache/hadoop/ozone/om/TestKeyManagerUnit.java |  179 ++-
 .../org/apache/hadoop/ozone/om/TestScmClient.java  |  191 ++++
 .../ozone/om/request/key/TestOMKeyRequest.java     |    5 +-
 .../om/{ => service}/TestKeyDeletingService.java   |    6 +-
 .../{ => service}/TestOpenKeyCleanupService.java   |    6 +-
 hadoop-ozone/ozonefs-common/pom.xml                |    5 +-
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |    6 +-
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |  169 ++-
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |   20 +-
 .../fs/ozone/BasicRootedOzoneFileSystem.java       |  128 ++-
 .../apache/hadoop/fs/ozone/FileStatusAdapter.java  |   17 +-
 .../apache/hadoop/fs/ozone/OzoneClientUtils.java   |   13 +-
 hadoop-ozone/ozonefs-hadoop2/pom.xml               |    6 +-
 .../apache/hadoop/fs/ozone/OzoneFileSystem.java    |   20 +-
 hadoop-ozone/ozonefs-hadoop3/pom.xml               |    6 +-
 hadoop-ozone/ozonefs-shaded/pom.xml                |    4 +
 hadoop-ozone/ozonefs/pom.xml                       |    5 +-
 .../hadoop/ozone/recon/ReconServerConfigKeys.java  |    2 +-
 .../hadoop/ozone/recon/api/ContainerEndpoint.java  |    1 +
 .../ozone/recon/scm/ReconContainerManager.java     |   27 +-
 .../scm/ReconStorageContainerManagerFacade.java    |    2 +-
 .../webapps/recon/ozone-recon-web/api/db.json      |  372 ++++++-
 .../webapps/recon/ozone-recon-web/api/routes.json  |    9 +-
 .../components/autoReloadPanel/autoReloadPanel.tsx |   44 +-
 .../src/components/navBar/navBar.tsx               |    5 +
 .../webapps/recon/ozone-recon-web/src/routes.tsx   |    8 +
 .../src/views/datanodes/datanodes.tsx              |   29 +-
 .../src/views/diskUsage/diskUsage.less             |   36 +-
 .../src/views/diskUsage/diskUsage.tsx              |   68 +-
 .../views/missingContainers/missingContainers.tsx  |  227 ++--
 .../src/views/overview/overview.tsx                |   35 +-
 .../src/views/pipelines/pipelines.less             |    2 +-
 .../src/views/pipelines/pipelines.tsx              |    4 +-
 .../ozone/recon/fsck/TestContainerHealthTask.java  |    3 +-
 hadoop-ozone/s3gateway/pom.xml                     |   42 -
 .../hadoop/ozone/s3/endpoint/EndpointBase.java     |   72 +-
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |   21 +-
 .../ozone/s3/signature/StringToSignProducer.java   |    1 +
 .../org/apache/hadoop/ozone/s3/util/S3Consts.java  |    2 +
 .../hadoop/ozone/s3/endpoint/TestEndpointBase.java |  106 ++
 .../apache/hadoop/ozone/debug/ChunkKeyHandler.java |    1 -
 .../java/org/apache/hadoop/ozone/freon/Freon.java  |    2 +
 .../ozone/freon/FreonReplicationOptions.java       |    2 +-
 .../hadoop/ozone/freon/KeyGeneratorUtil.java       |   50 +
 .../hadoop/ozone/freon/OmRPCLoadGenerator.java     |    2 +-
 .../ozone/freon/OzoneClientKeyReadWriteOps.java    |  245 +++++
 .../hadoop/ozone/freon/RangeKeysGenerator.java     |  164 +++
 .../hadoop/ozone/shell/ReplicationOptions.java     |   18 +-
 .../ozone/shell/ShellReplicationOptions.java       |    3 +-
 .../ozone/shell/bucket/InfoBucketHandler.java      |    2 +-
 .../ozone/shell/bucket/ListBucketHandler.java      |   17 +-
 pom.xml                                            |   96 +-
 310 files changed, 10711 insertions(+), 5212 deletions(-)
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CacheMetrics.java
 copy hadoop-ozone/ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java => hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedRunnable.java (52%)
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/util/CheckedSupplier.java
 create mode 100644 hadoop-hdds/common/src/main/java/org/apache/hadoop/util/MetricUtil.java
 create mode 100644 hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ec/reconstruction/ECReconstructionMetrics.java
 create mode 100644 hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/server/http/HttpServer2Metrics.java
 create mode 100644 hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/server/http/TestHttpServer2MetricsTest.java
 copy hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/balancer/{ContainerBalancer.java => ContainerBalancerTask.java} (66%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerCheckRequest.java
 delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerHealthCheck.java
 delete mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerHealthCheck.java
 rename hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/{ => replication}/RatisContainerReplicaCount.java (76%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationQueue.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/AbstractCheck.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ClosedWithMismatchedReplicasHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ClosingContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/ECReplicationCheckHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/EmptyContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/HealthCheck.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/OpenContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/QuasiClosedContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/RatisReplicationCheckHandler.java
 copy hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.less => hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/health/package-info.java (86%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/report/ContainerReportValidator.java
 copy hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.less => hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/report/package-info.java (93%)
 create mode 100644 hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/metadata/SCMMetadataStoreMetrics.java
 copy hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/balancer/{TestContainerBalancer.java => TestContainerBalancerTask.java} (87%)
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestClosedWithMismatchedReplicasHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestClosingContainerHandler.java
 rename hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/{TestECContainerHealthCheck.java => health/TestECReplicationCheckHandler.java} (53%)
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestEmptyContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestOpenContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestQuasiClosedContainerHandler.java
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/TestRatisReplicationCheckHandler.java
 copy hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.less => hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/health/package-info.java (85%)
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/report/TestContainerReportValidator.java
 copy hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/pipelines/pipelines.less => hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/report/package-info.java (77%)
 create mode 100644 hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/metadata/TestSCMMetadataStoreImpl.java
 create mode 100644 hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/KeyInfoWithVolumeContext.java
 create mode 100644 hadoop-ozone/dist/src/main/smoketest/freon/read-write-key.robot
 rename hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/{multitenant => service}/TestRangerBGSyncService.java (98%)
 create mode 100644 hadoop-ozone/integration-test/src/test/resources/testSequenceFile
 create mode 100644 hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMPerformanceMetrics.java
 rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/{ => service}/DirectoryDeletingService.java (99%)
 rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/{ => service}/KeyDeletingService.java (98%)
 rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/{multitenant => service}/OMRangerBGSyncService.java (98%)
 rename hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/{ => service}/OpenKeyCleanupService.java (97%)
 copy hadoop-ozone/{ozonefs-hadoop2/src/main/java/org/apache/hadoop/fs/ozone/OzoneFileSystem.java => ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/package-info.java} (82%)
 create mode 100644 hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestScmClient.java
 rename hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/{ => service}/TestKeyDeletingService.java (98%)
 rename hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/{ => service}/TestOpenKeyCleanupService.java (97%)
 create mode 100644 hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestEndpointBase.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/KeyGeneratorUtil.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyReadWriteOps.java
 create mode 100644 hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/RangeKeysGenerator.java


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 24/38: HDDS-6138.[Ozone-Streaming] Define a limit on the size of the retry bufferList. (#2946)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 693eea7ae6deb1004aba02cbb5ff377739a93d3b
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Thu Feb 10 14:36:03 2022 +0530

    HDDS-6138.[Ozone-Streaming] Define a limit on the size of the retry bufferList. (#2946)
    
    (cherry picked from commit 30f8311b0e2c92e2c3b665ac98993fc23a0b6709)
    (cherry picked from commit 54361e5a4ded763d83c5b3116203deece0303960)
---
 .../apache/hadoop/hdds/scm/OzoneClientConfig.java  | 33 +++++++++---------
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |  2 +-
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 40 +++++++++++++++++-----
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  | 12 +++----
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 11 +++---
 .../client/rpc/TestBlockDataStreamOutput.java      | 14 ++++----
 6 files changed, 67 insertions(+), 45 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index 86a725220a..80cb14f03e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -66,14 +66,6 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int streamBufferSize = 4 * 1024 * 1024;
 
-  @Config(key = "datastream.max.buffer.size",
-      defaultValue = "4MB",
-      type = ConfigType.SIZE,
-      description = "The maximum size of the ByteBuffer "
-          + "(used via ratis streaming)",
-      tags = ConfigTag.CLIENT)
-  private int dataStreamMaxBufferSize = 4 * 1024 * 1024;
-
   @Config(key = "datastream.buffer.flush.size",
       defaultValue = "16MB",
       type = ConfigType.SIZE,
@@ -89,6 +81,15 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int dataStreamMinPacketSize = 1024 * 1024;
 
+  @Config(key = "datastream.window.size",
+      defaultValue = "64MB",
+      type = ConfigType.SIZE,
+      description = "Maximum size of BufferList(used for retry) size per " +
+          "BlockDataStreamOutput instance",
+      tags = ConfigTag.CLIENT)
+  private long streamWindowSize = 64 * 1024 * 1024;
+
+
   @Config(key = "stream.buffer.increment",
       defaultValue = "0B",
       type = ConfigType.SIZE,
@@ -251,14 +252,6 @@ public class OzoneClientConfig {
     this.streamBufferSize = streamBufferSize;
   }
 
-  public int getDataStreamMaxBufferSize() {
-    return dataStreamMaxBufferSize;
-  }
-
-  public void setDataStreamMaxBufferSize(int dataStreamMaxBufferSize) {
-    this.dataStreamMaxBufferSize = dataStreamMaxBufferSize;
-  }
-
   public boolean isStreamBufferFlushDelay() {
     return streamBufferFlushDelay;
   }
@@ -283,6 +276,14 @@ public class OzoneClientConfig {
     this.dataStreamMinPacketSize = dataStreamMinPacketSize;
   }
 
+  public long getStreamWindowSize() {
+    return streamWindowSize;
+  }
+
+  public void setStreamWindowSize(long streamWindowSize) {
+    this.streamWindowSize = streamWindowSize;
+  }
+
   public int getMaxRetryCount() {
     return maxRetryCount;
   }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index 8e4ab16ee8..3ea269b08b 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -136,7 +136,7 @@ public final class XceiverClientRatis extends XceiverClientSpi {
         .orElse(0L);
   }
 
-  private long updateCommitInfosMap(
+  public long updateCommitInfosMap(
       Collection<RaftProtos.CommitInfoProto> commitInfoProtos) {
     // if the commitInfo map is empty, just update the commit indexes for each
     // of the servers
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index ec925d1e6a..a3fe1c2479 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -52,7 +52,9 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
+import java.util.LinkedList;
 import java.util.List;
+import java.util.Queue;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
@@ -116,9 +118,9 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   // Also, corresponding to the logIndex, the corresponding list of buffers will
   // be released from the buffer pool.
   private final StreamCommitWatcher commitWatcher;
-  private final AtomicReference<CompletableFuture<
-      ContainerCommandResponseProto>> putBlockFuture
-      = new AtomicReference<>(CompletableFuture.completedFuture(null));
+
+  private Queue<CompletableFuture<ContainerCommandResponseProto>>
+      putBlockFutures = new LinkedList<>();
 
   private final List<DatanodeDetails> failedServers;
   private final Checksum checksum;
@@ -307,14 +309,33 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   }
 
   private void doFlushIfNeeded() throws IOException {
-    Preconditions.checkArgument(config.getDataStreamBufferFlushSize() > config
-        .getDataStreamMaxBufferSize());
     long boundary = config.getDataStreamBufferFlushSize() / config
-        .getDataStreamMaxBufferSize();
+        .getDataStreamMinPacketSize();
+    // streamWindow is the maximum number of buffers that
+    // are allowed to exist in the bufferList. If buffers in
+    // the list exceed this limit , client will till it gets
+    // one putBlockResponse (first index) . This is similar to
+    // the bufferFull condition in async write path.
+    long streamWindow = config.getStreamWindowSize() / config
+        .getDataStreamMinPacketSize();
     if (!bufferList.isEmpty() && bufferList.size() % boundary == 0) {
       updateFlushLength();
       executePutBlock(false, false);
     }
+    if (bufferList.size()==streamWindow){
+      try {
+        checkOpen();
+        if (!putBlockFutures.isEmpty()) {
+          putBlockFutures.remove().get();
+        }
+      } catch (ExecutionException e) {
+        handleExecutionException(e);
+      } catch (InterruptedException ex) {
+        Thread.currentThread().interrupt();
+        handleInterruptedException(ex, true);
+      }
+      watchForCommit(true);
+    }
   }
 
   private void updateFlushLength() {
@@ -453,8 +474,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
             setIoException(ce);
             throw ce;
           });
-      putBlockFuture.updateAndGet(f -> f.thenCombine(flushFuture,
-          (previous, current) -> current));
+      putBlockFutures.add(flushFuture);
     } catch (IOException | ExecutionException e) {
       throw new IOException(EXCEPTION_MSG + e.toString(), e);
     } catch (InterruptedException ex) {
@@ -496,7 +516,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       // data since latest flush - we need to send the "EOF" flag
       executePutBlock(true, true);
     }
-    putBlockFuture.get().get();
+    CompletableFuture.allOf(putBlockFutures.toArray(EMPTY_FUTURE_ARRAY)).get();
     watchForCommit(false);
     // just check again if the exception is hit while waiting for the
     // futures to ensure flush has indeed succeeded
@@ -638,6 +658,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
                 CompletionException ce = new CompletionException(msg, e);
                 setIoException(ce);
                 throw ce;
+              } else if (r.isSuccess()) {
+                xceiverClient.updateCommitInfosMap(r.getCommitInfos());
               }
             }, responseExecutor);
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index a1783a6cb5..4809f0c357 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -326,7 +326,7 @@ public interface MiniOzoneCluster {
     protected OptionalInt streamBufferSize = OptionalInt.empty();
     protected Optional<Long> streamBufferFlushSize = Optional.empty();
     protected Optional<Long> dataStreamBufferFlushSize= Optional.empty();
-    protected OptionalInt dataStreamMaxBufferSize  = OptionalInt.empty();
+    protected Optional<Long> datastreamWindowSize= Optional.empty();
     protected Optional<Long> streamBufferMaxSize = Optional.empty();
     protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty();
     protected Optional<Long> blockSize = Optional.empty();
@@ -569,11 +569,6 @@ public interface MiniOzoneCluster {
       return this;
     }
 
-    public Builder setDataStreamBufferMaxSize(int size) {
-      dataStreamMaxBufferSize = OptionalInt.of(size);
-      return this;
-    }
-
     public Builder setDataStreamBufferFlushize(long size) {
       dataStreamBufferFlushSize = Optional.of(size);
       return this;
@@ -584,6 +579,11 @@ public interface MiniOzoneCluster {
       return this;
     }
 
+    public Builder setDataStreamStreamWindowSize(long size) {
+      datastreamWindowSize = Optional.of(size);
+      return this;
+    }
+
     /**
      * Sets the block size for stream buffer.
      *
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 872db06aa5..c955f5e6bd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -657,12 +657,12 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
       if (!dataStreamBufferFlushSize.isPresent()) {
         dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get());
       }
-      if (!dataStreamMaxBufferSize.isPresent()) {
-        dataStreamMaxBufferSize = OptionalInt.of(chunkSize.get());
-      }
       if (!dataStreamMinPacketSize.isPresent()) {
         dataStreamMinPacketSize = OptionalInt.of(chunkSize.get()/4);
       }
+      if (!datastreamWindowSize.isPresent()) {
+        datastreamWindowSize = Optional.of((long) 8 * chunkSize.get());
+      }
       if (!blockSize.isPresent()) {
         blockSize = Optional.of(2 * streamBufferMaxSize.get());
       }
@@ -681,12 +681,11 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
           streamBufferSizeUnit.get().toBytes(streamBufferFlushSize.get())));
       clientConfig.setDataStreamBufferFlushSize(Math.round(
           streamBufferSizeUnit.get().toBytes(dataStreamBufferFlushSize.get())));
-      clientConfig.setDataStreamMaxBufferSize((int) Math.round(
-          streamBufferSizeUnit.get()
-              .toBytes(dataStreamMaxBufferSize.getAsInt())));
       clientConfig.setDataStreamMinPacketSize((int) Math.round(
           streamBufferSizeUnit.get()
               .toBytes(dataStreamMinPacketSize.getAsInt())));
+      clientConfig.setStreamWindowSize(Math.round(
+          streamBufferSizeUnit.get().toBytes(datastreamWindowSize.get())));
       conf.setFromObject(clientConfig);
 
       conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index c6a3c32d2b..696ab92ab7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -106,9 +106,9 @@ public class TestBlockDataStreamOutput {
         .setStreamBufferFlushSize(flushSize)
         .setStreamBufferMaxSize(maxFlushSize)
         .setDataStreamBufferFlushize(maxFlushSize)
-        .setDataStreamBufferMaxSize(chunkSize)
         .setStreamBufferSizeUnit(StorageUnit.BYTES)
-        .setDataStreamMinPacketSize(2*chunkSize/5)
+        .setDataStreamMinPacketSize(chunkSize)
+        .setDataStreamStreamWindowSize(5*chunkSize)
         .build();
     cluster.waitForClusterToBeReady();
     //the easiest way to create an open container is creating a key
@@ -195,7 +195,7 @@ public class TestBlockDataStreamOutput {
 
   @Test
   public void testPutBlockAtBoundary() throws Exception {
-    int dataLength = 200;
+    int dataLength = 500;
     XceiverClientMetrics metrics =
         XceiverClientManager.getXceiverClientMetrics();
     long putBlockCount = metrics.getContainerOpCountMetrics(
@@ -213,8 +213,8 @@ public class TestBlockDataStreamOutput {
         metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
             <= pendingPutBlockCount + 1);
     key.close();
-    // Since data length is 200 , first putBlock will be at 160(flush boundary)
-    // and the other at 200
+    // Since data length is 500 , first putBlock will be at 400(flush boundary)
+    // and the other at 500
     Assert.assertTrue(
         metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
             == putBlockCount + 2);
@@ -242,10 +242,10 @@ public class TestBlockDataStreamOutput {
     long writeChunkCount =
         metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
     byte[] data =
-        ContainerTestHelper.getFixedLengthString(keyString, chunkSize / 5)
+        ContainerTestHelper.getFixedLengthString(keyString, chunkSize / 2)
             .getBytes(UTF_8);
     key.write(ByteBuffer.wrap(data));
-    // minPacketSize= 40, so first write of 20 wont trigger a writeChunk
+    // minPacketSize= 100, so first write of 50 wont trigger a writeChunk
     Assert.assertEquals(writeChunkCount,
         metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
     key.write(ByteBuffer.wrap(data));


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 37/38: HDDS-6867. [Ozone-Streaming] PutKeyHandler should not use streaming to put EC key. (#3516)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 29a8c157fd5e487c023e1cdd9e4349af6c09b4f2
Author: mingchao zhao <ca...@apache.org>
AuthorDate: Thu Jun 16 00:18:40 2022 +0800

    HDDS-6867.  [Ozone-Streaming] PutKeyHandler should not use streaming to put EC key. (#3516)
    
    (cherry picked from commit 2227d339d091d714dc24dfc06f6e24a8e5f7b74f)
    (cherry picked from commit 04ebe898435bbef40228751590cd26f1d1e2f051)
---
 .../container/upgrade/TestDatanodeUpgradeToSchemaV3.java      |  2 ++
 .../org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java     | 11 ++++++++++-
 2 files changed, 12 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
index 517842f2d3..c004b38843 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/upgrade/TestDatanodeUpgradeToSchemaV3.java
@@ -105,6 +105,8 @@ public class TestDatanodeUpgradeToSchemaV3 {
     conf = new OzoneConfiguration();
     conf.setBoolean(DatanodeConfiguration.CONTAINER_SCHEMA_V3_ENABLED,
         this.schemaV3Enabled);
+    conf.setBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
   }
 
   @Before
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index c34783dbdd..1f7c1ef7f4 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -30,6 +30,7 @@ import java.util.HashMap;
 import java.util.Map;
 
 import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -41,6 +42,7 @@ import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
 import org.apache.commons.codec.digest.DigestUtils;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
 
@@ -94,7 +96,14 @@ public class PutKeyHandler extends KeyHandler {
     int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
         OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
 
-    if (dataFile.length() <= chunkSize) {
+    Boolean useAsync = false;
+    if (dataFile.length() <= chunkSize ||
+        (replicationConfig != null &&
+        replicationConfig.getReplicationType()  == EC) ||
+        bucket.getReplicationConfig() instanceof ECReplicationConfig) {
+      useAsync = true;
+    }
+    if (useAsync) {
       if (isVerbose()) {
         out().println("API: async");
       }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 02/38: HDDS-5452. Add link method to ContainerStateMachine for Ratis streaming (#2422)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 5626566ba7e144ccd8d6426b7aaf048a67706d42
Author: Kaijie Chen <ch...@kaijie.org>
AuthorDate: Sun Jul 18 12:18:10 2021 +0800

    HDDS-5452. Add link method to ContainerStateMachine for Ratis streaming (#2422)
    
    (cherry picked from commit b6e214398150bfc19653142cc69b4fb4675fa4de)
    (cherry picked from commit 425bc95539a8de334125f4bfe38aff93f1730f5d)
---
 .../transport/server/ratis/ContainerStateMachine.java   | 17 +++++++++++++++++
 1 file changed, 17 insertions(+)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 47e042643a..f8d066757a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -92,6 +92,7 @@ import org.apache.ratis.thirdparty.com.google.protobuf.InvalidProtocolBufferExce
 import org.apache.ratis.thirdparty.com.google.protobuf.TextFormat;
 import org.apache.ratis.util.TaskQueue;
 import org.apache.ratis.util.function.CheckedSupplier;
+import org.apache.ratis.util.JavaUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -535,6 +536,22 @@ public class ContainerStateMachine extends BaseStateMachine {
     }, executor);
   }
 
+  public CompletableFuture<?> link(DataStream stream, LogEntryProto entry) {
+    return CompletableFuture.supplyAsync(() -> {
+      if (stream == null) {
+        return JavaUtils.completeExceptionally(
+            new IllegalStateException("DataStream is null"));
+      }
+      if (stream.getDataChannel().isOpen()) {
+        return JavaUtils.completeExceptionally(
+            new IllegalStateException(
+                "DataStream: " + stream + " is not closed properly"));
+      } else {
+        return CompletableFuture.completedFuture(null);
+      }
+    }, executor);
+  }
+
   private ExecutorService getChunkExecutor(WriteChunkRequestProto req) {
     int i = (int)(req.getBlockID().getLocalID() % chunkExecutors.size());
     return chunkExecutors.get(i);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 15/38: HDDS-5961. [Ozone-Streaming] update the usage space of Containers in the stream write (#2833)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 8cbadbcaf6c464f9234dc68bb27ed767925216f1
Author: hao guo <gu...@360.cn>
AuthorDate: Wed Nov 17 15:19:47 2021 +0800

    HDDS-5961. [Ozone-Streaming] update the usage space of Containers in the stream write (#2833)
    
    (cherry picked from commit ddc8a2c3d603e37be5d1f57611f4b691f56ac170)
    (cherry picked from commit 65ec5817e22a666a4fa4fe76ac5c0c1c3263865a)
---
 .../container/common/impl/HddsDispatcher.java      |  18 ++
 .../common/interfaces/ContainerDispatcher.java     |  10 ++
 .../ozone/container/common/interfaces/Handler.java |   5 +
 .../server/ratis/ContainerStateMachine.java        |  21 ++-
 .../transport/server/ratis/StreamDataChannel.java  |  57 -------
 .../ozone/container/keyvalue/KeyValueHandler.java  |  12 ++
 .../keyvalue/impl/ChunkManagerDispatcher.java      |  10 ++
 .../keyvalue/impl/FilePerBlockStrategy.java        |  12 ++
 .../keyvalue/impl/KeyValueStreamDataChannel.java   |  90 ++++++++++
 .../keyvalue/interfaces/ChunkManager.java          |   8 +
 .../src/main/proto/DatanodeClientProtocol.proto    |   1 +
 .../rpc/TestContainerStateMachineStream.java       | 183 +++++++++++++++++++++
 12 files changed, 364 insertions(+), 63 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index a4693b227e..27bd0c55ad 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -67,6 +67,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuil
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest;
 
 import org.apache.hadoop.util.Time;
+import org.apache.ratis.statemachine.StateMachine;
 import org.apache.ratis.thirdparty.com.google.protobuf.ProtocolMessageEnum;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -695,4 +696,21 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
     default: return false;
     }
   }
+
+  @Override
+  public StateMachine.DataChannel getStreamDataChannel(
+          ContainerCommandRequestProto msg)
+          throws StorageContainerException {
+    long containerID = msg.getContainerID();
+    Container container = getContainer(containerID);
+    if (container != null) {
+      Handler handler = getHandler(getContainerType(container));
+      return handler.getStreamDataChannel(container, msg);
+    } else {
+      throw new StorageContainerException(
+              "ContainerID " + containerID + " does not exist",
+              ContainerProtos.Result.CONTAINER_NOT_FOUND);
+    }
+  }
+
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
index a2e397d546..d02bae0a35 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
+import org.apache.ratis.statemachine.StateMachine;
 
 import java.util.Map;
 
@@ -84,4 +85,13 @@ public interface ContainerDispatcher {
    * @param clusterId
    */
   void setClusterId(String clusterId);
+
+  /**
+   * When uploading using stream, get StreamDataChannel.
+   */
+  default StateMachine.DataChannel getStreamDataChannel(
+      ContainerCommandRequestProto msg) throws StorageContainerException {
+    throw new UnsupportedOperationException(
+        "getStreamDataChannel not supported.");
+  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
index 67f977ca38..a62de490b4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.ozone.container.common.transport.server.ratis.Dispatche
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.keyvalue.TarContainerPacker;
+import org.apache.ratis.statemachine.StateMachine;
 
 /**
  * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type
@@ -81,6 +82,10 @@ public abstract class Handler {
     }
   }
 
+  public abstract StateMachine.DataChannel getStreamDataChannel(
+          Container container, ContainerCommandRequestProto msg)
+          throws StorageContainerException;
+
   /**
    * Returns the Id of this datanode.
    *
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 83255e0450..3ef9477d97 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -25,7 +25,6 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Arrays;
 import java.util.Collection;
-import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CompletableFuture;
@@ -82,6 +81,7 @@ import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.protocol.TermIndex;
 import org.apache.ratis.server.raftlog.RaftLog;
 import org.apache.ratis.server.storage.RaftStorage;
+import org.apache.ratis.statemachine.StateMachine;
 import org.apache.ratis.statemachine.StateMachineStorage;
 import org.apache.ratis.statemachine.TransactionContext;
 import org.apache.ratis.statemachine.impl.BaseStateMachine;
@@ -513,6 +513,19 @@ public class ContainerStateMachine extends BaseStateMachine {
     return raftFuture;
   }
 
+  private StateMachine.DataChannel getStreamDataChannel(
+          ContainerCommandRequestProto requestProto,
+          DispatcherContext context) throws StorageContainerException {
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("{}: getStreamDataChannel {} containerID={} pipelineID={} " +
+                      "traceID={}", gid, requestProto.getCmdType(),
+              requestProto.getContainerID(), requestProto.getPipelineID(),
+              requestProto.getTraceID());
+    }
+    runCommand(requestProto, context);  // stream init
+    return dispatcher.getStreamDataChannel(requestProto);
+  }
+
   @Override
   public CompletableFuture<DataStream> stream(RaftClientRequest request) {
     return CompletableFuture.supplyAsync(() -> {
@@ -524,11 +537,7 @@ public class ContainerStateMachine extends BaseStateMachine {
                 .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
                 .setContainer2BCSIDMap(container2BCSIDMap)
                 .build();
-
-        ContainerCommandResponseProto response = runCommand(
-            requestProto, context);
-        final StreamDataChannel channel = new StreamDataChannel(
-            Paths.get(response.getMessage()));
+        DataChannel channel = getStreamDataChannel(requestProto, context);
         final ExecutorService chunkExecutor = requestProto.hasWriteChunk() ?
             getChunkExecutor(requestProto.getWriteChunk()) : null;
         return new LocalStream(channel, chunkExecutor);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/StreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/StreamDataChannel.java
deleted file mode 100644
index 3df66e26dc..0000000000
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/StreamDataChannel.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
- */
-
-package org.apache.hadoop.ozone.container.common.transport.server.ratis;
-
-import org.apache.ratis.statemachine.StateMachine;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
-import java.nio.file.Path;
-
-class StreamDataChannel implements StateMachine.DataChannel {
-  private final Path path;
-  private final RandomAccessFile randomAccessFile;
-
-  StreamDataChannel(Path path) throws FileNotFoundException {
-    this.path = path;
-    this.randomAccessFile = new RandomAccessFile(path.toFile(), "rw");
-  }
-
-  @Override
-  public void force(boolean metadata) throws IOException {
-    randomAccessFile.getChannel().force(metadata);
-  }
-
-  @Override
-  public int write(ByteBuffer src) throws IOException {
-    return randomAccessFile.getChannel().write(src);
-  }
-
-  @Override
-  public boolean isOpen() {
-    return randomAccessFile.getChannel().isOpen();
-  }
-
-  @Override
-  public void close() throws IOException {
-    randomAccessFile.close();
-  }
-}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 9be52d6403..eafea02216 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -109,6 +109,7 @@ import static org.apache.hadoop.hdds.scm.utils.ClientCommandsUtils.getReadChunkV
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerDataProto.State.RECOVERING;
 
+import org.apache.ratis.statemachine.StateMachine;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -182,6 +183,17 @@ public class KeyValueHandler extends Handler {
     return volumeChoosingPolicy;
   }
 
+  @Override
+  public StateMachine.DataChannel getStreamDataChannel(
+          Container container, ContainerCommandRequestProto msg)
+          throws StorageContainerException {
+    KeyValueContainer kvContainer = (KeyValueContainer) container;
+    checkContainerOpen(kvContainer);
+    BlockID blockID = BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID());
+    return chunkManager.getStreamDataChannel(kvContainer,
+            blockID, metrics);
+  }
+
   @Override
   public void stop() {
     chunkManager.shutdown();
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index 3e2ab46470..92f6327447 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.impl.ContainerLayoutVersion;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
@@ -33,6 +34,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 
+import org.apache.ratis.statemachine.StateMachine;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -79,6 +81,14 @@ public class ChunkManagerDispatcher implements ChunkManager {
         .streamInit(container, blockID);
   }
 
+  @Override
+  public StateMachine.DataChannel getStreamDataChannel(
+          Container container, BlockID blockID, ContainerMetrics metrics)
+          throws StorageContainerException {
+    return selectHandler(container)
+            .getStreamDataChannel(container, blockID, metrics);
+  }
+
   @Override
   public void finishWriteChunks(KeyValueContainer kvContainer,
       BlockData blockData) throws IOException {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 9efc6bc351..23db342da0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.common.utils.BufferUtils;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
@@ -42,6 +43,7 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.BlockManager;
 import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 
+import org.apache.ratis.statemachine.StateMachine;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -97,6 +99,16 @@ public class FilePerBlockStrategy implements ChunkManager {
     return chunkFile.getAbsolutePath();
   }
 
+  @Override
+  public StateMachine.DataChannel getStreamDataChannel(
+          Container container, BlockID blockID, ContainerMetrics metrics)
+          throws StorageContainerException {
+    checkLayoutVersion(container);
+    File chunkFile = getChunkFile(container, blockID, null);
+    return new KeyValueStreamDataChannel(chunkFile,
+        container.getContainerData(), metrics);
+  }
+
   @Override
   public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
       ChunkBuffer data, DispatcherContext dispatcherContext)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
new file mode 100644
index 0000000000..c0570f5d4d
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
@@ -0,0 +1,90 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.impl;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.ratis.statemachine.StateMachine;
+
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+
+/**
+ * This class is used to get the DataChannel for streaming.
+ */
+class KeyValueStreamDataChannel implements StateMachine.DataChannel {
+  private final RandomAccessFile randomAccessFile;
+  private final File file;
+
+  private final ContainerData containerData;
+  private final ContainerMetrics metrics;
+
+  KeyValueStreamDataChannel(File file, ContainerData containerData,
+                            ContainerMetrics metrics)
+      throws StorageContainerException {
+    try {
+      this.file = file;
+      this.randomAccessFile = new RandomAccessFile(file, "rw");
+    } catch (FileNotFoundException e) {
+      throw new StorageContainerException("BlockFile not exists with " +
+          "container Id " + containerData.getContainerID() +
+          " file " + file.getAbsolutePath(),
+          ContainerProtos.Result.IO_EXCEPTION);
+    }
+    this.containerData = containerData;
+    this.metrics = metrics;
+  }
+
+  @Override
+  public void force(boolean metadata) throws IOException {
+    randomAccessFile.getChannel().force(metadata);
+  }
+
+  @Override
+  public int write(ByteBuffer src) throws IOException {
+    int writeBytes = randomAccessFile.getChannel().write(src);
+    metrics
+        .incContainerBytesStats(ContainerProtos.Type.StreamWrite, writeBytes);
+    containerData.updateWriteStats(writeBytes, false);
+    return writeBytes;
+  }
+
+  @Override
+  public boolean isOpen() {
+    return randomAccessFile.getChannel().isOpen();
+  }
+
+  @Override
+  public void close() throws IOException {
+    randomAccessFile.close();
+  }
+
+  @Override
+  public String toString() {
+    return "KeyValueStreamDataChannel{" +
+        "File=" + file.getAbsolutePath() +
+        ", containerID=" + containerData.getContainerID() +
+        '}';
+  }
+}
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
index ba06eebd69..7a64f07628 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
@@ -25,9 +25,11 @@ import org.apache.hadoop.ozone.common.ChecksumData;
 import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.container.common.helpers.BlockData;
 import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.ratis.statemachine.StateMachine;
 
 import java.io.IOException;
 import java.nio.ByteBuffer;
@@ -109,6 +111,12 @@ public interface ChunkManager {
     return null;
   }
 
+  default StateMachine.DataChannel getStreamDataChannel(
+          Container container, BlockID blockID, ContainerMetrics metrics)
+          throws StorageContainerException {
+    return null;
+  }
+
   static long getBufferCapacityForChunkRead(ChunkInfo chunkInfo,
       long defaultReadBufferCapacity) {
     long bufferCapacity = 0;
diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
index 423e63babd..d19f466a97 100644
--- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
@@ -102,6 +102,7 @@ enum Type {
   GetCommittedBlockLength = 18;
 
   StreamInit = 19;
+  StreamWrite = 20;
 }
 
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
new file mode 100644
index 0000000000..3b17450376
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.ratis.conf.RatisClientConfig;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.HddsDatanodeService;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.TestHelper;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.junit.After;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.time.Duration;
+import java.util.List;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+
+/**
+ * Tests the containerStateMachine stream handling.
+ */
+public class TestContainerStateMachineStream {
+
+  /**
+   * Set a timeout for each test.
+   */
+  @Rule
+  public Timeout timeout = Timeout.seconds(300);
+
+  private MiniOzoneCluster cluster;
+  private OzoneConfiguration conf = new OzoneConfiguration();
+  private OzoneClient client;
+  private ObjectStore objectStore;
+  private String volumeName;
+  private String bucketName;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   *
+   * @throws IOException
+   */
+  @Before
+  public void setup() throws Exception {
+    conf = new OzoneConfiguration();
+
+    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+    clientConfig.setStreamBufferFlushDelay(false);
+    conf.setFromObject(clientConfig);
+
+    conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
+        TimeUnit.MILLISECONDS);
+    conf.setTimeDuration(HDDS_COMMAND_STATUS_REPORT_INTERVAL, 200,
+        TimeUnit.MILLISECONDS);
+    conf.setTimeDuration(HDDS_PIPELINE_REPORT_INTERVAL, 200,
+        TimeUnit.MILLISECONDS);
+    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 30, TimeUnit.SECONDS);
+    conf.setTimeDuration(OZONE_SCM_PIPELINE_DESTROY_TIMEOUT, 1,
+        TimeUnit.SECONDS);
+
+    RatisClientConfig ratisClientConfig =
+        conf.getObject(RatisClientConfig.class);
+    ratisClientConfig.setWriteRequestTimeout(Duration.ofSeconds(10));
+    ratisClientConfig.setWatchRequestTimeout(Duration.ofSeconds(10));
+    conf.setFromObject(ratisClientConfig);
+
+    DatanodeRatisServerConfig ratisServerConfig =
+        conf.getObject(DatanodeRatisServerConfig.class);
+    ratisServerConfig.setRequestTimeOut(Duration.ofSeconds(3));
+    ratisServerConfig.setWatchTimeOut(Duration.ofSeconds(10));
+    conf.setFromObject(ratisServerConfig);
+
+    RatisClientConfig.RaftConfig raftClientConfig =
+        conf.getObject(RatisClientConfig.RaftConfig.class);
+    raftClientConfig.setRpcRequestTimeout(Duration.ofSeconds(3));
+    raftClientConfig.setRpcWatchRequestTimeout(Duration.ofSeconds(10));
+    conf.setFromObject(raftClientConfig);
+
+    conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
+    conf.setQuietMode(false);
+    cluster =
+        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setHbInterval(200)
+            .build();
+    cluster.waitForClusterToBeReady();
+    cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
+    //the easiest way to create an open container is creating a key
+    client = OzoneClientFactory.getRpcClient(conf);
+    objectStore = client.getObjectStore();
+
+    volumeName = "testcontainerstatemachinestream";
+    bucketName = "teststreambucket";
+    objectStore.createVolume(volumeName);
+    objectStore.getVolume(volumeName).createBucket(bucketName);
+
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @After
+  public void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testContainerStateMachineForStreaming() throws Exception {
+    long size = 1024 * 8;
+
+    OzoneDataStreamOutput key = TestHelper.createStreamKey(
+        "ozone-stream-test.txt", ReplicationType.RATIS, size, objectStore,
+        volumeName, bucketName);
+
+    byte[] data =
+        ContainerTestHelper
+            .getFixedLengthString(UUID.randomUUID().toString(),
+                (int) (size / 2))
+            .getBytes(UTF_8);
+    key.write(ByteBuffer.wrap(data));
+    key.write(ByteBuffer.wrap(data));
+
+    key.flush();
+
+    KeyDataStreamOutput streamOutput =
+        (KeyDataStreamOutput) key.getByteBufStreamOutput();
+    List<OmKeyLocationInfo> locationInfoList =
+        streamOutput.getLocationInfoList();
+
+    key.close();
+
+    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
+    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
+        cluster);
+
+    long bytesUsed = dn.getDatanodeStateMachine()
+        .getContainer().getContainerSet()
+        .getContainer(omKeyLocationInfo.getContainerID()).
+            getContainerData().getBytesUsed();
+
+    Assert.assertTrue(bytesUsed == size);
+  }
+
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 29/38: HDDS-6355. [Ozone-Streaming] Fix CheckStyle problem (#3119)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit b2ec47d5d3e371795beb63ef4d6e7c00ecb4a4d6
Author: hao guo <gu...@360.cn>
AuthorDate: Mon Feb 21 21:02:50 2022 +0800

    HDDS-6355. [Ozone-Streaming] Fix CheckStyle problem (#3119)
    
    (cherry picked from commit e63539309fd026c5052e9648844aafad8170264a)
    (cherry picked from commit c1bc998af667eae6a404c127f017ce724f5415a3)
---
 .../apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java  | 10 +++++-----
 .../java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java  |  4 ++--
 .../hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java |  4 ++--
 .../java/org/apache/hadoop/ozone/client/rpc/RpcClient.java     |  2 +-
 .../test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java    |  4 ++--
 .../java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java     |  2 +-
 .../hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java     |  4 ++--
 .../java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java  |  2 +-
 .../org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java |  2 +-
 9 files changed, 17 insertions(+), 17 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 8dd9e6b50e..8b3e32cf41 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -283,7 +283,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   }
 
   private void writeChunkIfNeeded() throws IOException {
-    if (currentBuffer.length()==0) {
+    if (currentBuffer.length() == 0) {
       writeChunk(currentBuffer);
       currentBuffer = null;
     }
@@ -302,7 +302,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   }
 
   private void allocateNewBufferIfNeeded() {
-    if (currentBuffer==null) {
+    if (currentBuffer == null) {
       currentBuffer =
           StreamBuffer.allocate(config.getDataStreamMinPacketSize());
     }
@@ -323,7 +323,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       updateFlushLength();
       executePutBlock(false, false);
     }
-    if (bufferList.size()==streamWindow){
+    if (bufferList.size() == streamWindow) {
       try {
         checkOpen();
         if (!putBlockFutures.isEmpty()) {
@@ -514,7 +514,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       // here, we just limit this buffer to the current position. So that next
       // write will happen in new buffer
 
-      if (currentBuffer!=null) {
+      if (currentBuffer != null) {
         writeChunk(currentBuffer);
         currentBuffer = null;
       }
@@ -693,7 +693,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       boolean processExecutionException)
       throws IOException {
     LOG.error("Command execution was interrupted.");
-    if(processExecutionException) {
+    if (processExecutionException) {
       handleExecutionException(ex);
     } else {
       throw new IOException(EXCEPTION_MSG + ex.toString(), ex);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
index 5118ea5ead..d34e4dca94 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
@@ -48,11 +48,11 @@ public class StreamBuffer {
   }
 
 
-  public void put(StreamBuffer sb){
+  public void put(StreamBuffer sb) {
     buffer.put(sb.buffer);
   }
 
-  public static StreamBuffer allocate(int size){
+  public static StreamBuffer allocate(int size) {
     return new StreamBuffer(ByteBuffer.allocate(size));
   }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 24a046f623..00cda7844a 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -307,8 +307,8 @@ public class BlockDataStreamOutputEntryPool {
   }
 
   long computeBufferData() {
-    long totalDataLen =0;
-    for (StreamBuffer b : bufferList){
+    long totalDataLen = 0;
+    for (StreamBuffer b : bufferList) {
       totalDataLen += b.position();
     }
     return totalDataLen;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index b37b83f67e..643b2e186c 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1222,7 +1222,7 @@ public class RpcClient implements ClientProtocol {
         .setAcls(getAclList());
 
     if (Boolean.parseBoolean(metadata.get(OzoneConsts.GDPR_FLAG))) {
-      try{
+      try {
         GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom());
         builder.addAllMetadata(gKey.getKeyDetails());
       } catch (Exception e) {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 4809f0c357..4c485d805c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -325,8 +325,8 @@ public interface MiniOzoneCluster {
     protected Optional<Integer> chunkSize = Optional.empty();
     protected OptionalInt streamBufferSize = OptionalInt.empty();
     protected Optional<Long> streamBufferFlushSize = Optional.empty();
-    protected Optional<Long> dataStreamBufferFlushSize= Optional.empty();
-    protected Optional<Long> datastreamWindowSize= Optional.empty();
+    protected Optional<Long> dataStreamBufferFlushSize = Optional.empty();
+    protected Optional<Long> datastreamWindowSize = Optional.empty();
     protected Optional<Long> streamBufferMaxSize = Optional.empty();
     protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty();
     protected Optional<Long> blockSize = Optional.empty();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index c955f5e6bd..39f3552b36 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -658,7 +658,7 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
         dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get());
       }
       if (!dataStreamMinPacketSize.isPresent()) {
-        dataStreamMinPacketSize = OptionalInt.of(chunkSize.get()/4);
+        dataStreamMinPacketSize = OptionalInt.of(chunkSize.get() / 4);
       }
       if (!datastreamWindowSize.isPresent()) {
         datastreamWindowSize = Optional.of((long) 8 * chunkSize.get());
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 21003374d7..6225e25268 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -107,7 +107,7 @@ public class TestBlockDataStreamOutput {
         .setDataStreamBufferFlushize(maxFlushSize)
         .setStreamBufferSizeUnit(StorageUnit.BYTES)
         .setDataStreamMinPacketSize(chunkSize)
-        .setDataStreamStreamWindowSize(5*chunkSize)
+        .setDataStreamStreamWindowSize(5 * chunkSize)
         .build();
     cluster.waitForClusterToBeReady();
     //the easiest way to create an open container is creating a key
@@ -137,7 +137,7 @@ public class TestBlockDataStreamOutput {
   @Test
   public void testHalfChunkWrite() throws Exception {
     testWrite(chunkSize / 2);
-    testWriteWithFailure(chunkSize/2);
+    testWriteWithFailure(chunkSize / 2);
   }
 
   @Test
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 6ca32c4f3c..8eb92255b6 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -263,7 +263,7 @@ public class BasicOzoneFileSystem extends FileSystem {
     boolean isRatisStreamingEnabled = getConf().getBoolean(
         OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE,
         OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE_DEFAULT);
-    if (isRatisStreamingEnabled){
+    if (isRatisStreamingEnabled) {
       return new FSDataOutputStream(adapter.createStreamFile(key,
           replication, overwrite, recursive), statistics);
     }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index b248abce60..75c1da0ec8 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -243,7 +243,7 @@ public class BasicRootedOzoneFileSystem extends FileSystem {
     boolean isRatisStreamingEnabled = getConf().getBoolean(
         OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE,
         OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE_DEFAULT);
-    if (isRatisStreamingEnabled){
+    if (isRatisStreamingEnabled) {
       return new FSDataOutputStream(adapter.createStreamFile(key,
           replication, overwrite, recursive), statistics);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 14/38: HDDS-5987. [Ozone-Streaming] Add XceiverClientRatis stream config (#2841)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 3cf08778de15d9f94661840a855b6f0dadddca3f
Author: hao guo <gu...@360.cn>
AuthorDate: Mon Nov 15 23:54:36 2021 +0800

    HDDS-5987. [Ozone-Streaming] Add XceiverClientRatis stream config (#2841)
    
    (cherry picked from commit 9204b35f2b8d5edc55809559c77ecd311d20d996)
    (cherry picked from commit bb4eef2f7c90f1f919574307af05dc70a07f57dd)
---
 .../src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java  | 9 ++++++++-
 1 file changed, 8 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 164044ced0..43b34a1563 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -49,6 +49,7 @@ import org.apache.ratis.conf.RaftProperties;
 import org.apache.ratis.datastream.SupportedDataStreamType;
 import org.apache.ratis.grpc.GrpcConfigKeys;
 import org.apache.ratis.grpc.GrpcTlsConfig;
+import org.apache.ratis.netty.NettyConfigKeys;
 import org.apache.ratis.proto.RaftProtos;
 import org.apache.ratis.protocol.RaftGroup;
 import org.apache.ratis.protocol.RaftGroupId;
@@ -317,7 +318,8 @@ public final class RatisHelper {
     Map<String, String> ratisClientConf =
         getDatanodeRatisPrefixProps(ozoneConf);
     ratisClientConf.forEach((key, val) -> {
-      if (isClientConfig(key) || isGrpcClientConfig(key)) {
+      if (isClientConfig(key) || isGrpcClientConfig(key)
+              || isNettyStreamConfig(key)) {
         raftProperties.set(key, val);
       }
     });
@@ -333,6 +335,11 @@ public final class RatisHelper {
         !key.startsWith(GrpcConfigKeys.Admin.PREFIX) &&
         !key.startsWith(GrpcConfigKeys.Server.PREFIX);
   }
+
+  private static boolean isNettyStreamConfig(String key) {
+    return key.startsWith(NettyConfigKeys.DataStream.PREFIX);
+  }
+
   /**
    * Set all server properties matching with prefix
    * {@link RatisHelper#HDDS_DATANODE_RATIS_PREFIX_KEY} in


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 12/38: HDDS-5895. [Ozone-Streaming] Make raft.server.data-stream.client.pool.size configurable (#2766)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 11fb9bf708e6758c98eecb911a600e3d48dfda05
Author: micah zhao <mi...@tencent.com>
AuthorDate: Tue Oct 26 15:10:58 2021 +0800

    HDDS-5895. [Ozone-Streaming] Make raft.server.data-stream.client.pool.size configurable (#2766)
    
    (cherry picked from commit 47ae948f8f28e3640790fc37159c1a9e47389cef)
    (cherry picked from commit ca7a926165aa585195aa1f277cbb52f659ddbb29)
---
 .../transport/server/ratis/XceiverServerRatis.java      |  5 +++++
 .../hadoop/hdds/conf/DatanodeRatisServerConfig.java     | 17 +++++++++++++++++
 2 files changed, 22 insertions(+)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index d69b64cce1..2fcc07fc23 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -242,6 +242,11 @@ public final class XceiverServerRatis implements XceiverServerSpi {
             .getStreamWriteThreads();
     RaftServerConfigKeys.DataStream.setAsyncWriteThreadPoolSize(properties,
         dataStreamWriteRequestThreadPoolSize);
+    int dataStreamClientPoolSize =
+        conf.getObject(DatanodeRatisServerConfig.class)
+            .getClientPoolSize();
+    RaftServerConfigKeys.DataStream.setClientPoolSize(properties,
+        dataStreamClientPoolSize);
   }
 
   @SuppressWarnings("checkstyle:methodlength")
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
index 205d92e955..3132928abe 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
@@ -158,6 +158,23 @@ public class DatanodeRatisServerConfig {
     this.streamWriteThreads = streamWriteThreads;
   }
 
+  @Config(key = "datastream.client.pool.size",
+      defaultValue = "10",
+      type = ConfigType.INT,
+      tags = {OZONE, DATANODE, RATIS, DATASTREAM},
+      description = "Maximum number of client proxy in NettyServerStreamRpc " +
+          "for datastream write."
+  )
+  private int clientPoolSize;
+
+  public int getClientPoolSize() {
+    return clientPoolSize;
+  }
+
+  public void setClientPoolSize(int clientPoolSize) {
+    this.clientPoolSize = clientPoolSize;
+  }
+
   @Config(key = "delete.ratis.log.directory",
           defaultValue = "true",
           type = ConfigType.BOOLEAN,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 35/38: HDDS-6592. [Ozone-Streaming] Fix ContainerStateMachine#applyTransaction assert error (#3315)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 7805efb0122688a901076fa1ee6f9e8849c04a22
Author: hao guo <gu...@360.cn>
AuthorDate: Mon Apr 18 11:28:27 2022 +0800

    HDDS-6592. [Ozone-Streaming] Fix ContainerStateMachine#applyTransaction assert error (#3315)
    
    (cherry picked from commit 9d0b07e8c25c4bd1e3542dcf9be9b063db90fe5c)
    (cherry picked from commit f850883767b306b7065ad690acec87bb7b8a24cb)
---
 .../container/common/transport/server/ratis/ContainerStateMachine.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index b9707e5af2..f6f5a99927 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -879,7 +879,8 @@ public class ContainerStateMachine extends BaseStateMachine {
         builder.setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA);
       }
       if (cmdType == Type.WriteChunk || cmdType == Type.PutSmallFile
-          || cmdType == Type.PutBlock || cmdType == Type.CreateContainer) {
+          || cmdType == Type.PutBlock || cmdType == Type.CreateContainer
+          || cmdType == Type.StreamInit) {
         builder.setContainer2BCSIDMap(container2BCSIDMap);
       }
       CompletableFuture<Message> applyTransactionFuture =


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 36/38: HDDS-6842. [Ozone-Streaming] Reduce the number of watch requests in StreamCommitWatcher. (#3492)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit a5980ce1a4afe19b680454c445423a9200d1f39f
Author: Tsz-Wo Nicholas Sze <sz...@apache.org>
AuthorDate: Thu Jun 9 23:40:02 2022 -0700

    HDDS-6842. [Ozone-Streaming] Reduce the number of watch requests in StreamCommitWatcher. (#3492)
    
    (cherry picked from commit 8315f08791463eb6a2dc3d5fcd409156ff3e9a13)
    (cherry picked from commit 4c064f42aaa027baa0ca0c7ed373a143a26a6cec)
---
 .../hdds/scm/storage/AbstractDataStreamOutput.java |  3 +-
 .../hdds/scm/storage/StreamCommitWatcher.java      | 39 +++++++++++++---------
 .../ozone/freon/OzoneClientKeyGenerator.java       | 12 +++----
 3 files changed, 32 insertions(+), 22 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
index e29670d781..cad1d04792 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
@@ -29,6 +29,7 @@ import org.apache.ratis.protocol.exceptions.RaftRetryFailureException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
 import java.util.Map;
+import java.util.Objects;
 
 /**
  * This class is used for error handling methods.
@@ -111,7 +112,7 @@ public abstract class AbstractDataStreamOutput
     if (Thread.currentThread().isInterrupted()) {
       setExceptionAndThrow(exception);
     }
-    Preconditions.checkNotNull(action);
+    Objects.requireNonNull(action);
     Preconditions.checkArgument(
         action.action == RetryPolicy.RetryAction.RetryDecision.RETRY);
     if (action.delayMillis > 0) {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
index 1820416d32..8ca70de816 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
@@ -16,17 +16,13 @@
  *  limitations under the License.
  */
 
-/**
- * This class maintains the map of the commitIndexes to be watched for
- * successful replication in the datanodes in a given pipeline. It also releases
- * the buffers associated with the user data back to {@Link BufferPool} once
- * minimum replication criteria is achieved during an ozone key write.
- */
 package org.apache.hadoop.hdds.scm.storage;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.scm.XceiverClientReply;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.apache.ratis.util.JavaUtils;
+import org.apache.ratis.util.MemoizedSupplier;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -34,6 +30,9 @@ import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeoutException;
@@ -49,13 +48,15 @@ public class StreamCommitWatcher {
       LoggerFactory.getLogger(StreamCommitWatcher.class);
 
   private Map<Long, List<StreamBuffer>> commitIndexMap;
-  private List<StreamBuffer> bufferList;
+  private final List<StreamBuffer> bufferList;
 
   // total data which has been successfully flushed and acknowledged
   // by all servers
   private long totalAckDataLength;
+  private final ConcurrentMap<Long, CompletableFuture<XceiverClientReply>>
+      replies = new ConcurrentHashMap<>();
 
-  private XceiverClientSpi xceiverClient;
+  private final XceiverClientSpi xceiverClient;
 
   public StreamCommitWatcher(XceiverClientSpi xceiverClient,
       List<StreamBuffer> bufferList) {
@@ -130,16 +131,24 @@ public class StreamCommitWatcher {
    */
   public XceiverClientReply streamWatchForCommit(long commitIndex)
       throws IOException {
-    final long index;
+    final MemoizedSupplier<CompletableFuture<XceiverClientReply>> supplier
+        = JavaUtils.memoize(CompletableFuture::new);
+    final CompletableFuture<XceiverClientReply> f = replies.compute(commitIndex,
+        (key, value) -> value != null ? value : supplier.get());
+    if (!supplier.isInitialized()) {
+      // future already exists
+      return f.join();
+    }
+
     try {
       XceiverClientReply reply =
           xceiverClient.watchForCommit(commitIndex);
-      if (reply == null) {
-        index = 0;
-      } else {
-        index = reply.getLogIndex();
-      }
-      adjustBuffers(index);
+      f.complete(reply);
+      final CompletableFuture<XceiverClientReply> removed
+          = replies.remove(commitIndex);
+      Preconditions.checkState(removed == f);
+
+      adjustBuffers(reply.getLogIndex());
       return reply;
     } catch (InterruptedException e) {
       // Re-interrupt the thread while catching InterruptedException
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
index b0184bff67..b119e27ea6 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
@@ -24,7 +24,8 @@ import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 
@@ -132,14 +133,13 @@ public class OzoneClientKeyGenerator extends BaseFreonGenerator
   }
 
   private void createStreamKey(long counter) throws Exception {
-    final ReplicationConfig replicationConfig = ReplicationConfig
-        .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
-            HddsProtos.ReplicationFactor.THREE);
+    final ReplicationConfig conf = ReplicationConfig.fromProtoTypeAndFactor(
+        ReplicationType.RATIS, ReplicationFactor.THREE);
     final String key = generateObjectName(counter);
 
     timer.time(() -> {
-      try (OzoneDataStreamOutput stream = bucket
-          .createStreamKey(key, keySize, replicationConfig, metadata)) {
+      try (OzoneDataStreamOutput stream = bucket.createStreamKey(
+          key, keySize, conf, metadata)) {
         contentGenerator.write(stream);
       }
       return null;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 38/38: HDDS-6955. [Ozone-streaming] Add explicit stream flag in ozone shell (#3559)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 653a120236f70829f592da88e70b87d94c36dc02
Author: Kaijie Chen <ch...@kaijie.org>
AuthorDate: Tue Jul 12 18:49:09 2022 +0800

    HDDS-6955. [Ozone-streaming] Add explicit stream flag in ozone shell (#3559)
    
    * Add stream option in ozone sh key
    
    (cherry picked from commit 4c7151b5e861c3f3c8bfd02db73e8731f10eafd6)
    (cherry picked from commit c84da64fbcd34e62b17f1bd2a52238b0497952e8)
---
 .../hadoop/hdds/client/ReplicationConfig.java      | 11 +++
 .../hadoop/ozone/shell/keys/PutKeyHandler.java     | 90 ++++++++++++++--------
 2 files changed, 67 insertions(+), 34 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
index 610419527a..7542409679 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/ReplicationConfig.java
@@ -76,6 +76,17 @@ public interface ReplicationConfig {
     return parse(null, replication, config);
   }
 
+  static ReplicationConfig resolve(ReplicationConfig replicationConfig,
+      ReplicationConfig bucketReplicationConfig, ConfigurationSource conf) {
+    if (replicationConfig == null) {
+      replicationConfig = bucketReplicationConfig;
+    }
+    if (replicationConfig == null) {
+      replicationConfig = getDefault(conf);
+    }
+    return replicationConfig;
+  }
+
   /**
    * Helper method to serialize from proto.
    * <p>
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index 1f7c1ef7f4..16af2c5fd1 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -29,6 +29,7 @@ import java.nio.channels.FileChannel;
 import java.util.HashMap;
 import java.util.Map;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ECReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -42,13 +43,13 @@ import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
 import org.apache.commons.codec.digest.DigestUtils;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType.EC;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY;
 
 import org.apache.hadoop.ozone.shell.ShellReplicationOptions;
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Mixin;
+import picocli.CommandLine.Option;
 import picocli.CommandLine.Parameters;
 
 /**
@@ -61,6 +62,9 @@ public class PutKeyHandler extends KeyHandler {
   @Parameters(index = "1", arity = "1..1", description = "File to upload")
   private String fileName;
 
+  @Option(names = "--stream")
+  private boolean stream;
+
   @Mixin
   private ShellReplicationOptions replication;
 
@@ -96,41 +100,59 @@ public class PutKeyHandler extends KeyHandler {
     int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
         OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
 
-    Boolean useAsync = false;
-    if (dataFile.length() <= chunkSize ||
-        (replicationConfig != null &&
-        replicationConfig.getReplicationType()  == EC) ||
-        bucket.getReplicationConfig() instanceof ECReplicationConfig) {
-      useAsync = true;
-    }
-    if (useAsync) {
-      if (isVerbose()) {
-        out().println("API: async");
-      }
-      try (InputStream input = new FileInputStream(dataFile);
-           OutputStream output = bucket.createKey(keyName, dataFile.length(),
-               replicationConfig, keyMetadata)) {
-        IOUtils.copyBytes(input, output, chunkSize);
-      }
+    if (stream) {
+      stream(dataFile, bucket, keyName, keyMetadata,
+          replicationConfig, chunkSize);
     } else {
-      if (isVerbose()) {
-        out().println("API: streaming");
-      }
-      try (RandomAccessFile raf = new RandomAccessFile(dataFile, "r");
-           OzoneDataStreamOutput out = bucket.createStreamKey(keyName,
-               dataFile.length(), replicationConfig, keyMetadata)) {
-        FileChannel ch = raf.getChannel();
-        long len = raf.length();
-        long off = 0;
-        while (len > 0) {
-          long writeLen = Math.min(len, chunkSize);
-          ByteBuffer bb = ch.map(FileChannel.MapMode.READ_ONLY, off, writeLen);
-          out.write(bb);
-          off += writeLen;
-          len -= writeLen;
-        }
-      }
+      async(dataFile, bucket, keyName, keyMetadata,
+          replicationConfig, chunkSize);
+    }
+  }
+
+  void async(
+      File dataFile, OzoneBucket bucket,
+      String keyName, Map<String, String> keyMetadata,
+      ReplicationConfig replicationConfig, int chunkSize)
+      throws IOException {
+    if (isVerbose()) {
+      out().println("API: async");
+    }
+    try (InputStream input = new FileInputStream(dataFile);
+         OutputStream output = bucket.createKey(keyName, dataFile.length(),
+             replicationConfig, keyMetadata)) {
+      IOUtils.copyBytes(input, output, chunkSize);
     }
   }
 
+  void stream(
+      File dataFile, OzoneBucket bucket,
+      String keyName, Map<String, String> keyMetadata,
+      ReplicationConfig replicationConfig, int chunkSize)
+      throws IOException {
+    if (isVerbose()) {
+      out().println("API: streaming");
+    }
+    // In streaming mode, always resolve replication config at client side,
+    // because streaming is not compatible for writing EC keys.
+    replicationConfig = ReplicationConfig.resolve(replicationConfig,
+        bucket.getReplicationConfig(), getConf());
+    Preconditions.checkArgument(
+        !(replicationConfig instanceof ECReplicationConfig),
+        "Can not put EC key by streaming");
+
+    try (RandomAccessFile raf = new RandomAccessFile(dataFile, "r");
+         OzoneDataStreamOutput out = bucket.createStreamKey(keyName,
+             dataFile.length(), replicationConfig, keyMetadata)) {
+      FileChannel ch = raf.getChannel();
+      long len = raf.length();
+      long off = 0;
+      while (len > 0) {
+        long writeLen = Math.min(len, chunkSize);
+        ByteBuffer bb = ch.map(FileChannel.MapMode.READ_ONLY, off, writeLen);
+        out.write(bb);
+        off += writeLen;
+        len -= writeLen;
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 09/38: HDDS-5486. [Ozone-Streaming] Streaming supports writing in Pipline mode (#2682)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 279168014c3d4950eb83393787bb365444a7aa2c
Author: micah zhao <mi...@tencent.com>
AuthorDate: Thu Sep 30 11:21:47 2021 +0800

    HDDS-5486. [Ozone-Streaming] Streaming supports writing in Pipline mode (#2682)
    
    (cherry picked from commit 2e85eeb0cd790eff1403f20e0d9b31db45f17b07)
    (cherry picked from commit 10b14300d274cf224f56763c66dea9c2d8a6f731)
---
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 40 ++++++++++++++++++++--
 1 file changed, 37 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index c69af90a91..41e2c48bbb 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -41,6 +41,8 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.ratis.client.api.DataStreamOutput;
 import org.apache.ratis.io.StandardWriteOption;
 import org.apache.ratis.protocol.DataStreamReply;
+import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.RoutingTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -144,7 +146,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     this.xceiverClient =
         (XceiverClientRatis)xceiverClientManager.acquireClient(pipeline);
     // Alternatively, stream setup can be delayed till the first chunk write.
-    this.out = setupStream();
+    this.out = setupStream(pipeline);
     this.token = token;
 
     flushPeriod = (int) (config.getStreamBufferFlushSize() / config
@@ -166,7 +168,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
         config.getBytesPerChecksum());
   }
 
-  private DataStreamOutput setupStream() throws IOException {
+  private DataStreamOutput setupStream(Pipeline pipeline) throws IOException {
     // Execute a dummy WriteChunk request to get the path of the target file,
     // but does NOT write any data to it.
     ContainerProtos.WriteChunkRequestProto.Builder writeChunkRequest =
@@ -184,7 +186,39 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
         ContainerCommandRequestMessage.toMessage(builder.build(), null);
 
     return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
-        .stream(message.getContent().asReadOnlyByteBuffer());
+    .stream(message.getContent().asReadOnlyByteBuffer(),
+        getRoutingTable(pipeline));
+  }
+
+  public RoutingTable getRoutingTable(Pipeline pipeline) {
+    RaftPeerId primaryId = null;
+    List<RaftPeerId> raftPeers = new ArrayList<>();
+
+    for (DatanodeDetails dn : pipeline.getNodes()) {
+      final RaftPeerId raftPeerId = RaftPeerId.valueOf(dn.getUuidString());
+      try {
+        if (dn == pipeline.getFirstNode()) {
+          primaryId = raftPeerId;
+        }
+      } catch (IOException e) {
+        LOG.error("Can not get FirstNode from the pipeline: {} with " +
+            "exception: {}", pipeline.toString(), e.getLocalizedMessage());
+        return null;
+      }
+      raftPeers.add(raftPeerId);
+    }
+
+    RoutingTable.Builder builder = RoutingTable.newBuilder();
+    RaftPeerId previousId = primaryId;
+    for (RaftPeerId peerId : raftPeers) {
+      if (peerId.equals(primaryId)) {
+        continue;
+      }
+      builder.addSuccessor(previousId, peerId);
+      previousId = peerId;
+    }
+
+    return builder.build();
   }
 
   public BlockID getBlockID() {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 06/38: HDDS-5599. [Ozone-Streaming]drop BufferPool and ChunkBuffer to avoid buffer copying (#2557)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ae462a6747d4f7821f0476a60d338611d4d3c9c8
Author: micah zhao <mi...@tencent.com>
AuthorDate: Wed Aug 25 23:39:10 2021 +0800

    HDDS-5599.  [Ozone-Streaming]drop BufferPool and ChunkBuffer to avoid buffer copying (#2557)
    
    (cherry picked from commit b1f1051538e45b9661abfb348df70f057d78ebe0)
    (cherry picked from commit 6b51811d1f0605e95d52441579190865064b9d05)
---
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 290 +++------------------
 .../hdds/scm/storage/StreamCommitWatcher.java      | 166 ++++++++++++
 .../client/io/BlockDataStreamOutputEntry.java      |  33 +--
 .../client/io/BlockDataStreamOutputEntryPool.java  |  20 --
 .../ozone/client/io/KeyDataStreamOutput.java       |  29 +--
 5 files changed, 211 insertions(+), 327 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index f658df1af9..39ec2f9219 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -36,21 +36,18 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerExcep
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.common.ChecksumData;
-import org.apache.hadoop.ozone.common.ChunkBuffer;
 import org.apache.hadoop.ozone.common.OzoneChecksumException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.ratis.client.api.DataStreamOutput;
 import org.apache.ratis.io.StandardWriteOption;
 import org.apache.ratis.protocol.DataStreamReply;
-import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.Map;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
@@ -93,7 +90,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
 
   private int chunkIndex;
   private final AtomicLong chunkOffset = new AtomicLong();
-  private final BufferPool bufferPool;
   // The IOException will be set by response handling thread in case there is an
   // exception received in the response. If the exception is set, the next
   // request will fail upfront.
@@ -106,28 +102,16 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
   // effective data write attempted so far for the block
   private long writtenDataLength;
 
-  // List containing buffers for which the putBlock call will
-  // update the length in the datanodes. This list will just maintain
-  // references to the buffers in the BufferPool which will be cleared
-  // when the watchForCommit acknowledges a putBlock logIndex has been
-  // committed on all datanodes. This list will be a  place holder for buffers
-  // which got written between successive putBlock calls.
-  private List<ChunkBuffer> bufferList;
-
   // This object will maintain the commitIndexes and byteBufferList in order
   // Also, corresponding to the logIndex, the corresponding list of buffers will
   // be released from the buffer pool.
-  private final CommitWatcher commitWatcher;
+  private final StreamCommitWatcher commitWatcher;
 
   private final List<DatanodeDetails> failedServers;
   private final Checksum checksum;
 
   //number of buffers used before doing a flush/putBlock.
   private int flushPeriod;
-  //bytes remaining to write in the current buffer.
-  private int currentBufferRemaining;
-  //current buffer allocated to write
-  private ChunkBuffer currentBuffer;
   private final Token<? extends TokenIdentifier> token;
   private final DataStreamOutput out;
   private CompletableFuture<DataStreamReply> dataStreamCloseReply;
@@ -141,13 +125,11 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
    * @param blockID              block ID
    * @param xceiverClientManager client manager that controls client
    * @param pipeline             pipeline where block will be written
-   * @param bufferPool           pool of buffers
    */
   public BlockDataStreamOutput(
       BlockID blockID,
       XceiverClientFactory xceiverClientManager,
       Pipeline pipeline,
-      BufferPool bufferPool,
       OzoneClientConfig config,
       Token<? extends TokenIdentifier> token
   ) throws IOException {
@@ -163,11 +145,8 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
         (XceiverClientRatis)xceiverClientManager.acquireClient(pipeline);
     // Alternatively, stream setup can be delayed till the first chunk write.
     this.out = setupStream();
-    this.bufferPool = bufferPool;
     this.token = token;
 
-    //number of buffers used before doing a flush
-    refreshCurrentBuffer(bufferPool);
     flushPeriod = (int) (config.getStreamBufferFlushSize() / config
         .getStreamBufferSize());
 
@@ -178,8 +157,7 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
 
     // A single thread executor handle the responses of async requests
     responseExecutor = Executors.newSingleThreadExecutor();
-    commitWatcher = new CommitWatcher(bufferPool, xceiverClient);
-    bufferList = null;
+    commitWatcher = new StreamCommitWatcher(xceiverClient);
     totalDataFlushedLength = 0;
     writtenDataLength = 0;
     failedServers = new ArrayList<>(0);
@@ -209,20 +187,10 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
         .stream(message.getContent().asReadOnlyByteBuffer());
   }
 
-  private void refreshCurrentBuffer(BufferPool pool) {
-    currentBuffer = pool.getCurrentBuffer();
-    currentBufferRemaining =
-        currentBuffer != null ? currentBuffer.remaining() : 0;
-  }
-
   public BlockID getBlockID() {
     return blockID.get();
   }
 
-  public long getTotalAckDataLength() {
-    return commitWatcher.getTotalAckDataLength();
-  }
-
   public long getWrittenDataLength() {
     return writtenDataLength;
   }
@@ -236,82 +204,29 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
     return xceiverClient;
   }
 
-  @VisibleForTesting
-  public long getTotalDataFlushedLength() {
-    return totalDataFlushedLength;
-  }
-
-  @VisibleForTesting
-  public BufferPool getBufferPool() {
-    return bufferPool;
-  }
-
   public IOException getIoException() {
     return ioException.get();
   }
 
-  @VisibleForTesting
-  public Map<Long, List<ChunkBuffer>> getCommitIndex2flushedDataMap() {
-    return commitWatcher.getCommitIndex2flushedDataMap();
-  }
-
   @Override
-  public void write(ByteBuf b) throws IOException {
+  public void write(ByteBuf buf) throws IOException {
     checkOpen();
-    if (b == null) {
+    if (buf == null) {
       throw new NullPointerException();
     }
-    int off = b.readerIndex();
-    int len = b.readableBytes();
-
-    while (len > 0) {
-      allocateNewBufferIfNeeded();
-      final int writeLen = Math.min(currentBufferRemaining, len);
-      // TODO: avoid buffer copy here
-      currentBuffer.put(b.nioBuffer(off, writeLen));
-      currentBufferRemaining -= writeLen;
-      writeChunkIfNeeded();
-      off += writeLen;
-      len -= writeLen;
-      writtenDataLength += writeLen;
-      doFlushOrWatchIfNeeded();
-    }
-  }
-
-  private void writeChunkIfNeeded() throws IOException {
-    if (currentBufferRemaining == 0) {
-      writeChunk(currentBuffer);
-    }
-  }
-
-  private void doFlushOrWatchIfNeeded() throws IOException {
-    if (currentBufferRemaining == 0) {
-      if (bufferPool.getNumberOfUsedBuffers() % flushPeriod == 0) {
-        updateFlushLength();
-        executePutBlock(false, false);
-      }
-      // Data in the bufferPool can not exceed streamBufferMaxSize
-      if (bufferPool.getNumberOfUsedBuffers() == bufferPool.getCapacity()) {
-        handleFullBuffer();
-      }
+    final int len = buf.readableBytes();
+    if (len == 0) {
+      return;
     }
-  }
+    writeChunkToContainer(buf);
 
-  private void allocateNewBufferIfNeeded() {
-    if (currentBufferRemaining == 0) {
-      currentBuffer = bufferPool.allocateBuffer(config.getBufferIncrement());
-      currentBufferRemaining = currentBuffer.remaining();
-    }
+    writtenDataLength += len;
   }
 
   private void updateFlushLength() {
     totalDataFlushedLength = writtenDataLength;
   }
 
-  private boolean isBufferPoolFull() {
-    return bufferPool.computeBufferData() == config.getStreamBufferMaxSize();
-  }
-
   /**
    * Will be called on the retryPath in case closedContainerException/
    * TimeoutException.
@@ -319,70 +234,9 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
    * @throws IOException if error occurred
    */
 
-  // In this case, the data is already cached in the currentBuffer.
+  // TODO: We need add new retry policy without depend on bufferPool.
   public void writeOnRetry(long len) throws IOException {
-    if (len == 0) {
-      return;
-    }
-    if (LOG.isDebugEnabled()) {
-      LOG.debug("Retrying write length {} for blockID {}", len, blockID);
-    }
-    Preconditions.checkArgument(len <= config.getStreamBufferMaxSize());
-    int count = 0;
-    while (len > 0) {
-      ChunkBuffer buffer = bufferPool.getBuffer(count);
-      long writeLen = Math.min(buffer.position(), len);
-      if (!buffer.hasRemaining()) {
-        writeChunk(buffer);
-      }
-      len -= writeLen;
-      count++;
-      writtenDataLength += writeLen;
-      // we should not call isBufferFull/shouldFlush here.
-      // The buffer might already be full as whole data is already cached in
-      // the buffer. We should just validate
-      // if we wrote data of size streamBufferMaxSize/streamBufferFlushSize to
-      // call for handling full buffer/flush buffer condition.
-      if (writtenDataLength % config.getStreamBufferFlushSize() == 0) {
-        // reset the position to zero as now we will be reading the
-        // next buffer in the list
-        updateFlushLength();
-        executePutBlock(false, false);
-      }
-      if (writtenDataLength == config.getStreamBufferMaxSize()) {
-        handleFullBuffer();
-      }
-    }
-  }
 
-  /**
-   * This is a blocking call. It will wait for the flush till the commit index
-   * at the head of the commitIndex2flushedDataMap gets replicated to all or
-   * majority.
-   * @throws IOException
-   */
-  private void handleFullBuffer() throws IOException {
-    try {
-      checkOpen();
-      if (!commitWatcher.getFutureMap().isEmpty()) {
-        waitOnFlushFutures();
-      }
-    } catch (ExecutionException e) {
-      handleExecutionException(e);
-    } catch (InterruptedException ex) {
-      Thread.currentThread().interrupt();
-      handleInterruptedException(ex, true);
-    }
-    watchForCommit(true);
-  }
-
-
-  // It may happen that once the exception is encountered , we still might
-  // have successfully flushed up to a certain index. Make sure the buffers
-  // only contain data which have not been sufficiently replicated
-  private void adjustBuffersOnException() {
-    commitWatcher.releaseBuffersOnException();
-    refreshCurrentBuffer(bufferPool);
   }
 
   /**
@@ -397,7 +251,8 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
     checkOpen();
     try {
       XceiverClientReply reply = bufferFull ?
-          commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex();
+          commitWatcher.streamWatchOnFirstIndex() :
+          commitWatcher.streamWatchOnLastIndex();
       if (reply != null) {
         List<DatanodeDetails> dnList = reply.getDatanodes();
         if (!dnList.isEmpty()) {
@@ -412,7 +267,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
       setIoException(ioe);
       throw getIoException();
     }
-    refreshCurrentBuffer(bufferPool);
 
   }
 
@@ -426,22 +280,7 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
       boolean force) throws IOException {
     checkOpen();
     long flushPos = totalDataFlushedLength;
-    final List<ChunkBuffer> byteBufferList;
-    if (!force) {
-      Preconditions.checkNotNull(bufferList);
-      byteBufferList = bufferList;
-      bufferList = null;
-      Preconditions.checkNotNull(byteBufferList);
-    } else {
-      byteBufferList = null;
-    }
-
-    try {
-      CompletableFuture.allOf(futures.toArray(EMPTY_FUTURE_ARRAY)).get();
-    } catch (Exception e) {
-      LOG.warn("Failed to write all chunks through stream: " + e);
-      throw new IOException(e);
-    }
+    flush();
     if (close) {
       dataStreamCloseReply = out.closeAsync();
     }
@@ -471,15 +310,12 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
           if (LOG.isDebugEnabled()) {
             LOG.debug(
                 "Adding index " + asyncReply.getLogIndex() + " commitMap size "
-                    + commitWatcher.getCommitInfoMapSize() + " flushLength "
-                    + flushPos + " numBuffers " + byteBufferList.size()
-                    + " blockID " + blockID + " bufferPool size" + bufferPool
-                    .getSize() + " currentBufferIndex " + bufferPool
-                    .getCurrentBufferIndex());
+                    + commitWatcher.getCommitInfoSetSize() + " flushLength "
+                    + flushPos + " blockID " + blockID);
           }
           // for standalone protocol, logIndex will always be 0.
-          commitWatcher
-              .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList);
+          commitWatcher.updateCommitInfoSet(
+              asyncReply.getLogIndex());
         }
         return e;
       }, responseExecutor).exceptionally(e -> {
@@ -503,36 +339,12 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
 
   @Override
   public void flush() throws IOException {
-    if (xceiverClientFactory != null && xceiverClient != null
-        && bufferPool != null && bufferPool.getSize() > 0
-        && (!config.isStreamBufferFlushDelay() ||
-            writtenDataLength - totalDataFlushedLength
-                >= config.getStreamBufferSize())) {
-      try {
-        handleFlush(false);
-      } catch (ExecutionException e) {
-        // just set the exception here as well in order to maintain sanctity of
-        // ioException field
-        handleExecutionException(e);
-      } catch (InterruptedException ex) {
-        Thread.currentThread().interrupt();
-        handleInterruptedException(ex, true);
-      }
-    }
-  }
-
-  private void writeChunk(ChunkBuffer buffer)
-      throws IOException {
-    // This data in the buffer will be pushed to datanode and a reference will
-    // be added to the bufferList. Once putBlock gets executed, this list will
-    // be marked null. Hence, during first writeChunk call after every putBlock
-    // call or during the first call to writeChunk here, the list will be null.
-
-    if (bufferList == null) {
-      bufferList = new ArrayList<>();
+    try {
+      CompletableFuture.allOf(futures.toArray(EMPTY_FUTURE_ARRAY)).get();
+    } catch (Exception e) {
+      LOG.warn("Failed to write all chunks through stream: " + e);
+      throw new IOException(e);
     }
-    bufferList.add(buffer);
-    writeChunkToContainer(buffer.duplicate(0, buffer.position()));
   }
 
   /**
@@ -543,11 +355,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
     checkOpen();
     // flush the last chunk data residing on the currentBuffer
     if (totalDataFlushedLength < writtenDataLength) {
-      refreshCurrentBuffer(bufferPool);
-      Preconditions.checkArgument(currentBuffer.position() > 0);
-      if (currentBuffer.hasRemaining()) {
-        writeChunk(currentBuffer);
-      }
       // This can be a partially filled chunk. Since we are flushing the buffer
       // here, we just limit this buffer to the current position. So that next
       // write will happen in new buffer
@@ -570,8 +377,7 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
 
   @Override
   public void close() throws IOException {
-    if (xceiverClientFactory != null && xceiverClient != null
-        && bufferPool != null && bufferPool.getSize() > 0) {
+    if (xceiverClientFactory != null && xceiverClient != null) {
       try {
         handleFlush(true);
         dataStreamCloseReply.get();
@@ -583,10 +389,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
       } finally {
         cleanup(false);
       }
-      // TODO: Turn the below buffer empty check on when Standalone pipeline
-      // is removed in the write path in tests
-      // Preconditions.checkArgument(buffer.position() == 0);
-      // bufferPool.checkBufferPoolEmpty();
 
     }
   }
@@ -638,10 +440,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
     xceiverClientFactory = null;
     xceiverClient = null;
     commitWatcher.cleanup();
-    if (bufferList !=  null) {
-      bufferList.clear();
-    }
-    bufferList = null;
     responseExecutor.shutdown();
   }
 
@@ -655,7 +453,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
     if (isClosed()) {
       throw new IOException("BlockDataStreamOutput has been closed.");
     } else if (getIoException() != null) {
-      adjustBuffersOnException();
       throw getIoException();
     }
   }
@@ -683,12 +480,11 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
    * @throws OzoneChecksumException if there is an error while computing
    * checksum
    */
-  private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
-    int effectiveChunkSize = chunk.remaining();
+  private void writeChunkToContainer(ByteBuf buf)
+      throws IOException {
+    ChecksumData checksumData = checksum.computeChecksum(buf.nioBuffer());
+    int effectiveChunkSize = buf.readableBytes();
     final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
-    final ByteString data = chunk.toByteString(
-        bufferPool.byteStringConversion());
-    ChecksumData checksumData = checksum.computeChecksum(chunk);
     ChunkInfo chunkInfo = ChunkInfo.newBuilder()
         .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex)
         .setOffset(offset)
@@ -703,21 +499,22 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
 
     CompletableFuture<DataStreamReply> future =
         (needSync(offset + effectiveChunkSize) ?
-        out.writeAsync(data.asReadOnlyByteBuffer(), StandardWriteOption.SYNC) :
-        out.writeAsync(data.asReadOnlyByteBuffer()))
-        .whenCompleteAsync((r, e) -> {
-          if (e != null || !r.isSuccess()) {
-            if (e == null) {
-              e = new IOException("result is not success");
-            }
-            String msg = "Failed to write chunk " + chunkInfo.getChunkName() +
-                " " + "into block " + blockID;
-            LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
-            CompletionException ce = new CompletionException(msg, e);
-            setIoException(ce);
-            throw ce;
-          }
-        }, responseExecutor);
+            out.writeAsync(buf.nioBuffer(), StandardWriteOption.SYNC) :
+            out.writeAsync(buf.nioBuffer()))
+            .whenCompleteAsync((r, e) -> {
+              if (e != null || !r.isSuccess()) {
+                if (e == null) {
+                  e = new IOException("result is not success");
+                }
+                String msg =
+                    "Failed to write chunk " + chunkInfo.getChunkName() +
+                        " " + "into block " + blockID;
+                LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
+                CompletionException ce = new CompletionException(msg, e);
+                setIoException(ce);
+                throw ce;
+              }
+            }, responseExecutor);
 
     futures.add(future);
     containerBlockData.addChunks(chunkInfo);
@@ -754,7 +551,6 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
    */
   private void handleExecutionException(Exception ex) throws IOException {
     setIoException(ex);
-    adjustBuffersOnException();
     throw getIoException();
   }
 }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
new file mode 100644
index 0000000000..c187ffe902
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+/**
+ * This class maintains the map of the commitIndexes to be watched for
+ * successful replication in the datanodes in a given pipeline. It also releases
+ * the buffers associated with the user data back to {@Link BufferPool} once
+ * minimum replication criteria is achieved during an ozone key write.
+ */
+package org.apache.hadoop.hdds.scm.storage;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
+import org.apache.hadoop.hdds.scm.XceiverClientReply;
+import org.apache.hadoop.hdds.scm.XceiverClientSpi;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeoutException;
+
+/**
+ * This class executes watchForCommit on ratis pipeline and releases
+ * buffers once data successfully gets replicated.
+ */
+public class StreamCommitWatcher {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(StreamCommitWatcher.class);
+
+  private Set<Long> commitIndexSet;
+
+  // future Map to hold up all putBlock futures
+  private ConcurrentHashMap<Long,
+      CompletableFuture<ContainerCommandResponseProto>>
+      futureMap;
+
+  private XceiverClientSpi xceiverClient;
+
+  public StreamCommitWatcher(XceiverClientSpi xceiverClient) {
+    this.xceiverClient = xceiverClient;
+    commitIndexSet = new ConcurrentSkipListSet();
+    futureMap = new ConcurrentHashMap<>();
+  }
+
+  public void updateCommitInfoSet(long index) {
+    commitIndexSet.add(index);
+  }
+
+  int getCommitInfoSetSize() {
+    return commitIndexSet.size();
+  }
+
+  /**
+   * Calls watch for commit for the first index in commitIndex2flushedDataMap to
+   * the Ratis client.
+   * @return {@link XceiverClientReply} reply from raft client
+   * @throws IOException in case watchForCommit fails
+   */
+  public XceiverClientReply streamWatchOnFirstIndex() throws IOException {
+    if (!commitIndexSet.isEmpty()) {
+      // wait for the  first commit index in the commitIndex2flushedDataMap
+      // to get committed to all or majority of nodes in case timeout
+      // happens.
+      long index =
+          commitIndexSet.stream().mapToLong(v -> v).min()
+              .getAsLong();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("waiting for first index {} to catch up", index);
+      }
+      return streamWatchForCommit(index);
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * Calls watch for commit for the last index in commitIndex2flushedDataMap to
+   * the Ratis client.
+   * @return {@link XceiverClientReply} reply from raft client
+   * @throws IOException in case watchForCommit fails
+   */
+  public XceiverClientReply streamWatchOnLastIndex()
+      throws IOException {
+    if (!commitIndexSet.isEmpty()) {
+      // wait for the  commit index in the commitIndex2flushedDataMap
+      // to get committed to all or majority of nodes in case timeout
+      // happens.
+      long index =
+          commitIndexSet.stream().mapToLong(v -> v).max()
+              .getAsLong();
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("waiting for last flush Index {} to catch up", index);
+      }
+      return streamWatchForCommit(index);
+    } else {
+      return null;
+    }
+  }
+
+  /**
+   * calls watchForCommit API of the Ratis Client. This method is for streaming
+   * and no longer requires releaseBuffers
+   * @param commitIndex log index to watch for
+   * @return minimum commit index replicated to all nodes
+   * @throws IOException IOException in case watch gets timed out
+   */
+  public XceiverClientReply streamWatchForCommit(long commitIndex)
+      throws IOException {
+    try {
+      XceiverClientReply reply =
+          xceiverClient.watchForCommit(commitIndex);
+      return reply;
+    } catch (InterruptedException e) {
+      // Re-interrupt the thread while catching InterruptedException
+      Thread.currentThread().interrupt();
+      throw getIOExceptionForWatchForCommit(commitIndex, e);
+    } catch (TimeoutException | ExecutionException e) {
+      throw getIOExceptionForWatchForCommit(commitIndex, e);
+    }
+  }
+
+  private IOException getIOExceptionForWatchForCommit(long commitIndex,
+                                                       Exception e) {
+    LOG.warn("watchForCommit failed for index {}", commitIndex, e);
+    IOException ioException = new IOException(
+        "Unexpected Storage Container Exception: " + e.toString(), e);
+    return ioException;
+  }
+
+  public ConcurrentMap<Long,
+        CompletableFuture<
+            ContainerCommandResponseProto>> getFutureMap() {
+    return futureMap;
+  }
+
+  public void cleanup() {
+    if (commitIndexSet != null) {
+      commitIndexSet.clear();
+    }
+    if (futureMap != null) {
+      futureMap.clear();
+    }
+    commitIndexSet = null;
+  }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
index 6954742601..98907bf8af 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
-import org.apache.hadoop.hdds.scm.storage.BufferPool;
 import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
@@ -52,15 +51,12 @@ public final class BlockDataStreamOutputEntry
   private long currentPosition;
   private final Token<OzoneBlockTokenIdentifier> token;
 
-  private BufferPool bufferPool;
-
   @SuppressWarnings({"parameternumber", "squid:S00107"})
   private BlockDataStreamOutputEntry(
       BlockID blockID, String key,
       XceiverClientFactory xceiverClientManager,
       Pipeline pipeline,
       long length,
-      BufferPool bufferPool,
       Token<OzoneBlockTokenIdentifier> token,
       OzoneClientConfig config
   ) {
@@ -73,7 +69,6 @@ public final class BlockDataStreamOutputEntry
     this.token = token;
     this.length = length;
     this.currentPosition = 0;
-    this.bufferPool = bufferPool;
   }
 
   long getLength() {
@@ -98,7 +93,7 @@ public final class BlockDataStreamOutputEntry
     if (this.byteBufStreamOutput == null) {
       this.byteBufStreamOutput =
           new BlockDataStreamOutput(blockID, xceiverClientManager,
-              pipeline, bufferPool, config, token);
+              pipeline, config, token);
     }
   }
 
@@ -135,20 +130,6 @@ public final class BlockDataStreamOutputEntry
     return false;
   }
 
-  long getTotalAckDataLength() {
-    if (byteBufStreamOutput != null) {
-      BlockDataStreamOutput out =
-          (BlockDataStreamOutput) this.byteBufStreamOutput;
-      blockID = out.getBlockID();
-      return out.getTotalAckDataLength();
-    } else {
-      // For a pre allocated block for which no write has been initiated,
-      // the ByteBufStreamOutput will be null here.
-      // In such cases, the default blockCommitSequenceId will be 0
-      return 0;
-    }
-  }
-
   Collection<DatanodeDetails> getFailedServers() {
     if (byteBufStreamOutput != null) {
       BlockDataStreamOutput out =
@@ -198,7 +179,6 @@ public final class BlockDataStreamOutputEntry
     private XceiverClientFactory xceiverClientManager;
     private Pipeline pipeline;
     private long length;
-    private BufferPool bufferPool;
     private Token<OzoneBlockTokenIdentifier> token;
     private OzoneClientConfig config;
 
@@ -230,12 +210,6 @@ public final class BlockDataStreamOutputEntry
       return this;
     }
 
-
-    public Builder setBufferPool(BufferPool pool) {
-      this.bufferPool = pool;
-      return this;
-    }
-
     public Builder setConfig(OzoneClientConfig clientConfig) {
       this.config = clientConfig;
       return this;
@@ -252,7 +226,6 @@ public final class BlockDataStreamOutputEntry
           xceiverClientManager,
           pipeline,
           length,
-          bufferPool,
           token, config);
     }
   }
@@ -282,10 +255,6 @@ public final class BlockDataStreamOutputEntry
     return currentPosition;
   }
 
-  public BufferPool getBufferPool() {
-    return bufferPool;
-  }
-
   public void setCurrentPosition(long curPosition) {
     this.currentPosition = curPosition;
   }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 94c505f2af..4bc55de262 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -22,12 +22,10 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.ByteStringConversion;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.storage.BufferPool;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -58,7 +56,6 @@ public class BlockDataStreamOutputEntryPool {
   private final OmKeyArgs keyArgs;
   private final XceiverClientFactory xceiverClientFactory;
   private final String requestID;
-  private final BufferPool bufferPool;
   private OmMultipartCommitUploadPartInfo commitUploadPartInfo;
   private final long openID;
   private final ExcludeList excludeList;
@@ -86,13 +83,6 @@ public class BlockDataStreamOutputEntryPool {
     this.requestID = requestId;
     this.openID = openID;
     this.excludeList = new ExcludeList();
-
-    this.bufferPool =
-        new BufferPool(config.getStreamBufferSize(),
-            (int) (config.getStreamBufferMaxSize() / config
-                .getStreamBufferSize()),
-            ByteStringConversion
-                .createByteBufferConversion(unsafeByteBufferConversion));
   }
 
   /**
@@ -114,8 +104,6 @@ public class BlockDataStreamOutputEntryPool {
     config.setStreamBufferFlushDelay(false);
     requestID = null;
     int chunkSize = 0;
-    bufferPool = new BufferPool(chunkSize, 1);
-
     currentStreamIndex = 0;
     openID = -1;
     excludeList = new ExcludeList();
@@ -154,7 +142,6 @@ public class BlockDataStreamOutputEntryPool {
             .setPipeline(subKeyInfo.getPipeline())
             .setConfig(config)
             .setLength(subKeyInfo.getLength())
-            .setBufferPool(bufferPool)
             .setToken(subKeyInfo.getToken());
     streamEntries.add(builder.build());
   }
@@ -293,17 +280,10 @@ public class BlockDataStreamOutputEntryPool {
     return streamEntries.get(currentStreamIndex);
   }
 
-  long computeBufferData() {
-    return bufferPool.computeBufferData();
-  }
-
   void cleanup() {
     if (excludeList != null) {
       excludeList.clear();
     }
-    if (bufferPool != null) {
-      bufferPool.clearBufferPool();
-    }
 
     if (streamEntries != null) {
       streamEntries.clear();
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index a9be11667c..c37f9cd51d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -279,27 +279,7 @@ public class KeyDataStreamOutput implements ByteBufStreamOutput {
     }
     Pipeline pipeline = streamEntry.getPipeline();
     PipelineID pipelineId = pipeline.getId();
-    long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
-    //set the correct length for the current stream
-    streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
-    long bufferedDataLen = blockDataStreamOutputEntryPool.computeBufferData();
-    if (containerExclusionException) {
-      LOG.debug(
-          "Encountered exception {}. The last committed block length is {}, "
-              + "uncommitted data length is {} retry count {}", exception,
-          totalSuccessfulFlushedData, bufferedDataLen, retryCount);
-    } else {
-      LOG.warn(
-          "Encountered exception {} on the pipeline {}. "
-              + "The last committed block length is {}, "
-              + "uncommitted data length is {} retry count {}", exception,
-          pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
-    }
-    Preconditions.checkArgument(
-        bufferedDataLen <= config.getStreamBufferMaxSize());
-    Preconditions.checkArgument(
-        offset - blockDataStreamOutputEntryPool.getKeyLength() ==
-        bufferedDataLen);
+
     long containerId = streamEntry.getBlockID().getContainerID();
     Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
     Preconditions.checkNotNull(failedServers);
@@ -337,13 +317,6 @@ public class KeyDataStreamOutput implements ByteBufStreamOutput {
       blockDataStreamOutputEntryPool
           .discardPreallocatedBlocks(-1, pipelineId);
     }
-    if (bufferedDataLen > 0) {
-      // If the data is still cached in the underlying stream, we need to
-      // allocate new block and write this data in the datanode.
-      handleRetry(exception, bufferedDataLen);
-      // reset the retryCount after handling the exception
-      retryCount = 0;
-    }
   }
 
   private void markStreamClosed() {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 20/38: HDDS-6130. [Ozone-Streaming] When releaseBuffers will get “Couldn 't find the required future” (#2939)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit ac981a77226f4cb0143483a0d2f91638789515b6
Author: micah zhao <mi...@tencent.com>
AuthorDate: Fri Dec 24 01:18:48 2021 +0800

    HDDS-6130. [Ozone-Streaming]  When releaseBuffers will get “Couldn 't find the required future” (#2939)
    
    (cherry picked from commit 2aacc7a889aa49237296fcecbcf0c664f7d42000)
    (cherry picked from commit 1ecb774a39971f264325b6ce2ab0d4cbe4025ee6)
---
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 92 ++++++++++------------
 .../hdds/scm/storage/StreamCommitWatcher.java      | 28 -------
 2 files changed, 42 insertions(+), 78 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 9fb1340527..84968b68d5 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
@@ -115,6 +116,9 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   // Also, corresponding to the logIndex, the corresponding list of buffers will
   // be released from the buffer pool.
   private final StreamCommitWatcher commitWatcher;
+  private final AtomicReference<CompletableFuture<
+      ContainerCommandResponseProto>> putBlockFuture
+      = new AtomicReference<>(CompletableFuture.completedFuture(null));
 
   private final List<DatanodeDetails> failedServers;
   private final Checksum checksum;
@@ -381,8 +385,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
    * @param force true if no data was written since most recent putBlock and
    *            stream is being closed
    */
-  private CompletableFuture<ContainerProtos.
-      ContainerCommandResponseProto> executePutBlock(boolean close,
+  private void executePutBlock(boolean close,
       boolean force) throws IOException {
     checkOpen();
     long flushPos = totalDataFlushedLength;
@@ -399,56 +402,54 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       dataStreamCloseReply = out.closeAsync();
     }
 
-    CompletableFuture<ContainerProtos.
-        ContainerCommandResponseProto> flushFuture = null;
     try {
       BlockData blockData = containerBlockData.build();
       XceiverClientReply asyncReply =
           putBlockAsync(xceiverClient, blockData, close, token);
-      CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
-          asyncReply.getResponse();
-      flushFuture = future.thenApplyAsync(e -> {
-        try {
-          validateResponse(e);
-        } catch (IOException sce) {
-          throw new CompletionException(sce);
-        }
-        // if the ioException is not set, putBlock is successful
-        if (getIoException() == null && !force) {
-          BlockID responseBlockID = BlockID.getFromProtobuf(
-              e.getPutBlock().getCommittedBlockLength().getBlockID());
-          Preconditions.checkState(blockID.get().getContainerBlockID()
-              .equals(responseBlockID.getContainerBlockID()));
-          // updates the bcsId of the block
-          blockID.set(responseBlockID);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(
-                "Adding index " + asyncReply.getLogIndex() + " commitMap size "
+      final CompletableFuture<ContainerCommandResponseProto> flushFuture
+          = asyncReply.getResponse().thenApplyAsync(e -> {
+            try {
+              validateResponse(e);
+            } catch (IOException sce) {
+              throw new CompletionException(sce);
+            }
+            // if the ioException is not set, putBlock is successful
+            if (getIoException() == null && !force) {
+              BlockID responseBlockID = BlockID.getFromProtobuf(
+                  e.getPutBlock().getCommittedBlockLength().getBlockID());
+              Preconditions.checkState(blockID.get().getContainerBlockID()
+                  .equals(responseBlockID.getContainerBlockID()));
+              // updates the bcsId of the block
+              blockID.set(responseBlockID);
+              if (LOG.isDebugEnabled()) {
+                LOG.debug("Adding index " + asyncReply.getLogIndex() +
+                    " commitMap size "
                     + commitWatcher.getCommitInfoMapSize() + " flushLength "
                     + flushPos + " blockID " + blockID);
-          }
-          // for standalone protocol, logIndex will always be 0.
-          commitWatcher
-              .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList);
-        }
-        return e;
-      }, responseExecutor).exceptionally(e -> {
-        if (LOG.isDebugEnabled()) {
-          LOG.debug("putBlock failed for blockID {} with exception {}",
-              blockID, e.getLocalizedMessage());
-        }
-        CompletionException ce = new CompletionException(e);
-        setIoException(ce);
-        throw ce;
-      });
+              }
+              // for standalone protocol, logIndex will always be 0.
+              commitWatcher
+                  .updateCommitInfoMap(asyncReply.getLogIndex(),
+                      byteBufferList);
+            }
+            return e;
+          }, responseExecutor).exceptionally(e -> {
+            if (LOG.isDebugEnabled()) {
+              LOG.debug("putBlock failed for blockID {} with exception {}",
+                  blockID, e.getLocalizedMessage());
+            }
+            CompletionException ce = new CompletionException(e);
+            setIoException(ce);
+            throw ce;
+          });
+      putBlockFuture.updateAndGet(f -> f.thenCombine(flushFuture,
+          (previous, current) -> current));
     } catch (IOException | ExecutionException e) {
       throw new IOException(EXCEPTION_MSG + e.toString(), e);
     } catch (InterruptedException ex) {
       Thread.currentThread().interrupt();
       handleInterruptedException(ex, false);
     }
-    commitWatcher.getFutureMap().put(flushPos, flushFuture);
-    return flushFuture;
   }
 
   @Override
@@ -484,7 +485,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       // data since latest flush - we need to send the "EOF" flag
       executePutBlock(true, true);
     }
-    waitOnFlushFutures();
+    putBlockFuture.get().get();
     watchForCommit(false);
     // just check again if the exception is hit while waiting for the
     // futures to ensure flush has indeed succeeded
@@ -512,15 +513,6 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     }
   }
 
-  private void waitOnFlushFutures()
-      throws InterruptedException, ExecutionException {
-    CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
-        commitWatcher.getFutureMap().values().toArray(
-            new CompletableFuture[commitWatcher.getFutureMap().size()]));
-    // wait for all the transactions to complete
-    combinedFuture.get();
-  }
-
   private void validateResponse(
       ContainerProtos.ContainerCommandResponseProto responseProto)
       throws IOException {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
index 9ae604e951..1820416d32 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
@@ -25,7 +25,6 @@
 package org.apache.hadoop.hdds.scm.storage;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.scm.XceiverClientReply;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.slf4j.Logger;
@@ -35,9 +34,6 @@ import java.io.IOException;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
-import java.util.concurrent.CompletableFuture;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeoutException;
@@ -59,18 +55,12 @@ public class StreamCommitWatcher {
   // by all servers
   private long totalAckDataLength;
 
-  // future Map to hold up all putBlock futures
-  private ConcurrentHashMap<Long,
-      CompletableFuture<ContainerCommandResponseProto>>
-      futureMap;
-
   private XceiverClientSpi xceiverClient;
 
   public StreamCommitWatcher(XceiverClientSpi xceiverClient,
       List<StreamBuffer> bufferList) {
     this.xceiverClient = xceiverClient;
     commitIndexMap = new ConcurrentSkipListMap<>();
-    futureMap = new ConcurrentHashMap<>();
     this.bufferList = bufferList;
     totalAckDataLength = 0;
   }
@@ -180,15 +170,6 @@ public class StreamCommitWatcher {
       final long length =
           buffers.stream().mapToLong(StreamBuffer::position).sum();
       totalAckDataLength += length;
-      // clear the future object from the future Map
-      final CompletableFuture<ContainerCommandResponseProto> remove =
-          futureMap.remove(totalAckDataLength);
-      if (remove == null) {
-        LOG.error("Couldn't find required future for " + totalAckDataLength);
-        for (Long key : futureMap.keySet()) {
-          LOG.error("Existing acknowledged data: " + key);
-        }
-      }
       for (StreamBuffer byteBuffer : buffers) {
         bufferList.remove(byteBuffer);
       }
@@ -209,19 +190,10 @@ public class StreamCommitWatcher {
     return ioException;
   }
 
-  public ConcurrentMap<Long,
-        CompletableFuture<
-            ContainerCommandResponseProto>> getFutureMap() {
-    return futureMap;
-  }
-
   public void cleanup() {
     if (commitIndexMap != null) {
       commitIndexMap.clear();
     }
-    if (futureMap != null) {
-      futureMap.clear();
-    }
     commitIndexMap = null;
   }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 13/38: HDDS-5763. Provide an Executor for each LocalStream in ContainerStateMachine (#2782)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 7cbb3fa89cd67a3d60575880641afd079d30706e
Author: Tsz-Wo Nicholas Sze <sz...@apache.org>
AuthorDate: Mon Nov 1 23:39:53 2021 +0800

    HDDS-5763. Provide an Executor for each LocalStream in ContainerStateMachine (#2782)
    
    (cherry picked from commit e186bfc26810ca606ba6ac971d3c4fbb147baba1)
    (cherry picked from commit 311b36652697155b052cd9781fd1590d22256660)
---
 .../transport/server/ratis/ContainerStateMachine.java   |  7 +++++--
 .../common/transport/server/ratis/LocalStream.java      | 10 +++++++++-
 .../transport/server/ratis/XceiverServerRatis.java      |  5 -----
 .../hadoop/hdds/conf/DatanodeRatisServerConfig.java     | 17 -----------------
 4 files changed, 14 insertions(+), 25 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 121a6d6bdd..83255e0450 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -527,8 +527,11 @@ public class ContainerStateMachine extends BaseStateMachine {
 
         ContainerCommandResponseProto response = runCommand(
             requestProto, context);
-        String path = response.getMessage();
-        return new LocalStream(new StreamDataChannel(Paths.get(path)));
+        final StreamDataChannel channel = new StreamDataChannel(
+            Paths.get(response.getMessage()));
+        final ExecutorService chunkExecutor = requestProto.hasWriteChunk() ?
+            getChunkExecutor(requestProto.getWriteChunk()) : null;
+        return new LocalStream(channel, chunkExecutor);
       } catch (IOException e) {
         throw new CompletionException("Failed to create data stream", e);
       }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
index baae013966..780f874398 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
@@ -23,12 +23,15 @@ import org.apache.ratis.statemachine.StateMachine;
 import java.io.IOException;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletionException;
+import java.util.concurrent.Executor;
 
 class LocalStream implements StateMachine.DataStream {
   private final StateMachine.DataChannel dataChannel;
+  private final Executor executor;
 
-  LocalStream(StateMachine.DataChannel dataChannel) {
+  LocalStream(StateMachine.DataChannel dataChannel, Executor executor) {
     this.dataChannel = dataChannel;
+    this.executor = executor;
   }
 
   @Override
@@ -47,4 +50,9 @@ class LocalStream implements StateMachine.DataStream {
       }
     });
   }
+
+  @Override
+  public Executor getExecutor() {
+    return executor;
+  }
 }
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index 2fcc07fc23..6b0ad0e41e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -237,11 +237,6 @@ public final class XceiverServerRatis implements XceiverServerSpi {
             .getStreamRequestThreads();
     RaftServerConfigKeys.DataStream.setAsyncRequestThreadPoolSize(properties,
         dataStreamAsyncRequestThreadPoolSize);
-    int dataStreamWriteRequestThreadPoolSize =
-        conf.getObject(DatanodeRatisServerConfig.class)
-            .getStreamWriteThreads();
-    RaftServerConfigKeys.DataStream.setAsyncWriteThreadPoolSize(properties,
-        dataStreamWriteRequestThreadPoolSize);
     int dataStreamClientPoolSize =
         conf.getObject(DatanodeRatisServerConfig.class)
             .getClientPoolSize();
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
index 3132928abe..058932e769 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
@@ -141,23 +141,6 @@ public class DatanodeRatisServerConfig {
     this.streamRequestThreads = streamRequestThreads;
   }
 
-  @Config(key = "datastream.write.threads",
-      defaultValue = "20",
-      type = ConfigType.INT,
-      tags = {OZONE, DATANODE, RATIS, DATASTREAM},
-      description = "Maximum number of threads in the thread pool for " +
-          "datastream write."
-  )
-  private int streamWriteThreads;
-
-  public int getStreamWriteThreads() {
-    return streamWriteThreads;
-  }
-
-  public void setStreamWriteThreads(int streamWriteThreads) {
-    this.streamWriteThreads = streamWriteThreads;
-  }
-
   @Config(key = "datastream.client.pool.size",
       defaultValue = "10",
       type = ConfigType.INT,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 22/38: HDDS-6178. [Ozone-Streaming] Fix NPE in HDDS-6139. (#2984)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 33c0260ab500e956412c24e28161f3e197eb8b2d
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Fri Jan 14 20:12:13 2022 +0530

    HDDS-6178. [Ozone-Streaming] Fix NPE in HDDS-6139. (#2984)
    
    (cherry picked from commit 72e1e02db8bb96ed84d4d4ccd84355d3343b5607)
    (cherry picked from commit 151ee53cd71442a272688840d0a5bfc01816aa11)
---
 .../org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java     | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 6ef59dd6d8..ec925d1e6a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -343,6 +343,10 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     while (len > 0) {
       final StreamBuffer buf = bufferList.get(count);
       final long writeLen = Math.min(buf.position(), len);
+      if (buffersForPutBlock == null) {
+        buffersForPutBlock = new ArrayList<>();
+      }
+      buffersForPutBlock.add(buf);
       final ByteBuffer duplicated = buf.duplicate();
       duplicated.position(0);
       duplicated.limit(buf.position());


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 05/38: HDDS-5488. [Ozone-Streaming] Add a new BlockOutputStream/KeyOutputStream to support streaming api (#2495)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 0952195d73d6512b4c053ffc41723ec76e7289f9
Author: Kaijie Chen <ch...@kaijie.org>
AuthorDate: Thu Aug 12 18:09:38 2021 +0800

    HDDS-5488. [Ozone-Streaming] Add a new BlockOutputStream/KeyOutputStream to support streaming api (#2495)
    
    (cherry picked from commit d33d9cb02184a77a170b0890dc5fbb6465b3b0e8)
    (cherry picked from commit 18db09d7d4e20f020b9fe2a73d9daac97b2ce51c)
---
 hadoop-hdds/client/pom.xml                         |   4 +
 .../apache/hadoop/hdds/scm/XceiverClientRatis.java |   5 +
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 760 +++++++++++++++++++++
 .../hdds/scm/storage/ByteBufStreamOutput.java      |  58 ++
 .../apache/hadoop/ozone/client/OzoneBucket.java    |  19 +
 .../client/io/BlockDataStreamOutputEntry.java      | 294 ++++++++
 .../client/io/BlockDataStreamOutputEntryPool.java  | 324 +++++++++
 .../ozone/client/io/KeyDataStreamOutput.java       | 629 +++++++++++++++++
 .../ozone/client/io/OzoneDataStreamOutput.java     |  70 ++
 .../ozone/client/protocol/ClientProtocol.java      |  15 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  62 ++
 .../client/rpc/TestBlockDataStreamOutput.java      | 181 +++++
 .../apache/hadoop/ozone/container/TestHelper.java  |  22 +-
 .../hadoop/ozone/shell/keys/PutKeyHandler.java     |  40 +-
 14 files changed, 2477 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index 3e3ff0cb9f..2e5bb745fa 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -74,6 +74,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.junit.jupiter</groupId>
       <artifactId>junit-jupiter-params</artifactId>
     </dependency>
+    <dependency>
+      <groupId>io.netty</groupId>
+      <artifactId>netty-buffer</artifactId>
+    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
index d0fd0db129..8e4ab16ee8 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientRatis.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdds.tracing.TracingUtil;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.ratis.client.RaftClient;
+import org.apache.ratis.client.api.DataStreamApi;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.apache.ratis.proto.RaftProtos;
 import org.apache.ratis.proto.RaftProtos.ReplicationLevel;
@@ -382,4 +383,8 @@ public final class XceiverClientRatis extends XceiverClientSpi {
     throw new UnsupportedOperationException(
             "Operation Not supported for ratis client");
   }
+
+  public DataStreamApi getDataStreamApi() {
+    return this.getClient().getDataStreamApi();
+  }
 }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
new file mode 100644
index 0000000000..f658df1af9
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -0,0 +1,760 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
+import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.hdds.scm.XceiverClientRatis;
+import org.apache.hadoop.hdds.scm.XceiverClientReply;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.ozone.common.Checksum;
+import org.apache.hadoop.ozone.common.ChecksumData;
+import org.apache.hadoop.ozone.common.ChunkBuffer;
+import org.apache.hadoop.ozone.common.OzoneChecksumException;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.ratis.client.api.DataStreamOutput;
+import org.apache.ratis.io.StandardWriteOption;
+import org.apache.ratis.protocol.DataStreamReply;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.concurrent.atomic.AtomicReference;
+
+import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync;
+
+/**
+ * An {@link ByteBufStreamOutput} used by the REST service in combination
+ * with the SCMClient to write the value of a key to a sequence
+ * of container chunks.  Writes are buffered locally and periodically written to
+ * the container as a new chunk.  In order to preserve the semantics that
+ * replacement of a pre-existing key is atomic, each instance of the stream has
+ * an internal unique identifier.  This unique identifier and a monotonically
+ * increasing chunk index form a composite key that is used as the chunk name.
+ * After all data is written, a putKey call creates or updates the corresponding
+ * container key, and this call includes the full list of chunks that make up
+ * the key data.  The list of chunks is updated all at once.  Therefore, a
+ * concurrent reader never can see an intermediate state in which different
+ * chunks of data from different versions of the key data are interleaved.
+ * This class encapsulates all state management for buffering and writing
+ * through to the container.
+ */
+public class BlockDataStreamOutput implements ByteBufStreamOutput {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BlockDataStreamOutput.class);
+  public static final String EXCEPTION_MSG =
+      "Unexpected Storage Container Exception: ";
+  private static final CompletableFuture[] EMPTY_FUTURE_ARRAY = {};
+
+  private AtomicReference<BlockID> blockID;
+
+  private final BlockData.Builder containerBlockData;
+  private XceiverClientFactory xceiverClientFactory;
+  private XceiverClientRatis xceiverClient;
+  private OzoneClientConfig config;
+
+  private int chunkIndex;
+  private final AtomicLong chunkOffset = new AtomicLong();
+  private final BufferPool bufferPool;
+  // The IOException will be set by response handling thread in case there is an
+  // exception received in the response. If the exception is set, the next
+  // request will fail upfront.
+  private final AtomicReference<IOException> ioException;
+  private final ExecutorService responseExecutor;
+
+  // the effective length of data flushed so far
+  private long totalDataFlushedLength;
+
+  // effective data write attempted so far for the block
+  private long writtenDataLength;
+
+  // List containing buffers for which the putBlock call will
+  // update the length in the datanodes. This list will just maintain
+  // references to the buffers in the BufferPool which will be cleared
+  // when the watchForCommit acknowledges a putBlock logIndex has been
+  // committed on all datanodes. This list will be a  place holder for buffers
+  // which got written between successive putBlock calls.
+  private List<ChunkBuffer> bufferList;
+
+  // This object will maintain the commitIndexes and byteBufferList in order
+  // Also, corresponding to the logIndex, the corresponding list of buffers will
+  // be released from the buffer pool.
+  private final CommitWatcher commitWatcher;
+
+  private final List<DatanodeDetails> failedServers;
+  private final Checksum checksum;
+
+  //number of buffers used before doing a flush/putBlock.
+  private int flushPeriod;
+  //bytes remaining to write in the current buffer.
+  private int currentBufferRemaining;
+  //current buffer allocated to write
+  private ChunkBuffer currentBuffer;
+  private final Token<? extends TokenIdentifier> token;
+  private final DataStreamOutput out;
+  private CompletableFuture<DataStreamReply> dataStreamCloseReply;
+  private List<CompletableFuture<DataStreamReply>> futures = new ArrayList<>();
+  private final long syncSize = 0; // TODO: disk sync is disabled for now
+  private long syncPosition = 0;
+
+  /**
+   * Creates a new BlockDataStreamOutput.
+   *
+   * @param blockID              block ID
+   * @param xceiverClientManager client manager that controls client
+   * @param pipeline             pipeline where block will be written
+   * @param bufferPool           pool of buffers
+   */
+  public BlockDataStreamOutput(
+      BlockID blockID,
+      XceiverClientFactory xceiverClientManager,
+      Pipeline pipeline,
+      BufferPool bufferPool,
+      OzoneClientConfig config,
+      Token<? extends TokenIdentifier> token
+  ) throws IOException {
+    this.xceiverClientFactory = xceiverClientManager;
+    this.config = config;
+    this.blockID = new AtomicReference<>(blockID);
+    KeyValue keyValue =
+        KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build();
+    this.containerBlockData =
+        BlockData.newBuilder().setBlockID(blockID.getDatanodeBlockIDProtobuf())
+            .addMetadata(keyValue);
+    this.xceiverClient =
+        (XceiverClientRatis)xceiverClientManager.acquireClient(pipeline);
+    // Alternatively, stream setup can be delayed till the first chunk write.
+    this.out = setupStream();
+    this.bufferPool = bufferPool;
+    this.token = token;
+
+    //number of buffers used before doing a flush
+    refreshCurrentBuffer(bufferPool);
+    flushPeriod = (int) (config.getStreamBufferFlushSize() / config
+        .getStreamBufferSize());
+
+    Preconditions
+        .checkArgument(
+            (long) flushPeriod * config.getStreamBufferSize() == config
+                .getStreamBufferFlushSize());
+
+    // A single thread executor handle the responses of async requests
+    responseExecutor = Executors.newSingleThreadExecutor();
+    commitWatcher = new CommitWatcher(bufferPool, xceiverClient);
+    bufferList = null;
+    totalDataFlushedLength = 0;
+    writtenDataLength = 0;
+    failedServers = new ArrayList<>(0);
+    ioException = new AtomicReference<>(null);
+    checksum = new Checksum(config.getChecksumType(),
+        config.getBytesPerChecksum());
+  }
+
+  private DataStreamOutput setupStream() throws IOException {
+    // Execute a dummy WriteChunk request to get the path of the target file,
+    // but does NOT write any data to it.
+    ContainerProtos.WriteChunkRequestProto.Builder writeChunkRequest =
+        ContainerProtos.WriteChunkRequestProto.newBuilder()
+            .setBlockID(blockID.get().getDatanodeBlockIDProtobuf());
+
+    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
+    ContainerProtos.ContainerCommandRequestProto.Builder builder =
+        ContainerProtos.ContainerCommandRequestProto.newBuilder()
+            .setCmdType(ContainerProtos.Type.StreamInit)
+            .setContainerID(blockID.get().getContainerID())
+            .setDatanodeUuid(id).setWriteChunk(writeChunkRequest);
+
+    ContainerCommandRequestMessage message =
+        ContainerCommandRequestMessage.toMessage(builder.build(), null);
+
+    return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
+        .stream(message.getContent().asReadOnlyByteBuffer());
+  }
+
+  private void refreshCurrentBuffer(BufferPool pool) {
+    currentBuffer = pool.getCurrentBuffer();
+    currentBufferRemaining =
+        currentBuffer != null ? currentBuffer.remaining() : 0;
+  }
+
+  public BlockID getBlockID() {
+    return blockID.get();
+  }
+
+  public long getTotalAckDataLength() {
+    return commitWatcher.getTotalAckDataLength();
+  }
+
+  public long getWrittenDataLength() {
+    return writtenDataLength;
+  }
+
+  public List<DatanodeDetails> getFailedServers() {
+    return failedServers;
+  }
+
+  @VisibleForTesting
+  public XceiverClientRatis getXceiverClient() {
+    return xceiverClient;
+  }
+
+  @VisibleForTesting
+  public long getTotalDataFlushedLength() {
+    return totalDataFlushedLength;
+  }
+
+  @VisibleForTesting
+  public BufferPool getBufferPool() {
+    return bufferPool;
+  }
+
+  public IOException getIoException() {
+    return ioException.get();
+  }
+
+  @VisibleForTesting
+  public Map<Long, List<ChunkBuffer>> getCommitIndex2flushedDataMap() {
+    return commitWatcher.getCommitIndex2flushedDataMap();
+  }
+
+  @Override
+  public void write(ByteBuf b) throws IOException {
+    checkOpen();
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    int off = b.readerIndex();
+    int len = b.readableBytes();
+
+    while (len > 0) {
+      allocateNewBufferIfNeeded();
+      final int writeLen = Math.min(currentBufferRemaining, len);
+      // TODO: avoid buffer copy here
+      currentBuffer.put(b.nioBuffer(off, writeLen));
+      currentBufferRemaining -= writeLen;
+      writeChunkIfNeeded();
+      off += writeLen;
+      len -= writeLen;
+      writtenDataLength += writeLen;
+      doFlushOrWatchIfNeeded();
+    }
+  }
+
+  private void writeChunkIfNeeded() throws IOException {
+    if (currentBufferRemaining == 0) {
+      writeChunk(currentBuffer);
+    }
+  }
+
+  private void doFlushOrWatchIfNeeded() throws IOException {
+    if (currentBufferRemaining == 0) {
+      if (bufferPool.getNumberOfUsedBuffers() % flushPeriod == 0) {
+        updateFlushLength();
+        executePutBlock(false, false);
+      }
+      // Data in the bufferPool can not exceed streamBufferMaxSize
+      if (bufferPool.getNumberOfUsedBuffers() == bufferPool.getCapacity()) {
+        handleFullBuffer();
+      }
+    }
+  }
+
+  private void allocateNewBufferIfNeeded() {
+    if (currentBufferRemaining == 0) {
+      currentBuffer = bufferPool.allocateBuffer(config.getBufferIncrement());
+      currentBufferRemaining = currentBuffer.remaining();
+    }
+  }
+
+  private void updateFlushLength() {
+    totalDataFlushedLength = writtenDataLength;
+  }
+
+  private boolean isBufferPoolFull() {
+    return bufferPool.computeBufferData() == config.getStreamBufferMaxSize();
+  }
+
+  /**
+   * Will be called on the retryPath in case closedContainerException/
+   * TimeoutException.
+   * @param len length of data to write
+   * @throws IOException if error occurred
+   */
+
+  // In this case, the data is already cached in the currentBuffer.
+  public void writeOnRetry(long len) throws IOException {
+    if (len == 0) {
+      return;
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Retrying write length {} for blockID {}", len, blockID);
+    }
+    Preconditions.checkArgument(len <= config.getStreamBufferMaxSize());
+    int count = 0;
+    while (len > 0) {
+      ChunkBuffer buffer = bufferPool.getBuffer(count);
+      long writeLen = Math.min(buffer.position(), len);
+      if (!buffer.hasRemaining()) {
+        writeChunk(buffer);
+      }
+      len -= writeLen;
+      count++;
+      writtenDataLength += writeLen;
+      // we should not call isBufferFull/shouldFlush here.
+      // The buffer might already be full as whole data is already cached in
+      // the buffer. We should just validate
+      // if we wrote data of size streamBufferMaxSize/streamBufferFlushSize to
+      // call for handling full buffer/flush buffer condition.
+      if (writtenDataLength % config.getStreamBufferFlushSize() == 0) {
+        // reset the position to zero as now we will be reading the
+        // next buffer in the list
+        updateFlushLength();
+        executePutBlock(false, false);
+      }
+      if (writtenDataLength == config.getStreamBufferMaxSize()) {
+        handleFullBuffer();
+      }
+    }
+  }
+
+  /**
+   * This is a blocking call. It will wait for the flush till the commit index
+   * at the head of the commitIndex2flushedDataMap gets replicated to all or
+   * majority.
+   * @throws IOException
+   */
+  private void handleFullBuffer() throws IOException {
+    try {
+      checkOpen();
+      if (!commitWatcher.getFutureMap().isEmpty()) {
+        waitOnFlushFutures();
+      }
+    } catch (ExecutionException e) {
+      handleExecutionException(e);
+    } catch (InterruptedException ex) {
+      Thread.currentThread().interrupt();
+      handleInterruptedException(ex, true);
+    }
+    watchForCommit(true);
+  }
+
+
+  // It may happen that once the exception is encountered , we still might
+  // have successfully flushed up to a certain index. Make sure the buffers
+  // only contain data which have not been sufficiently replicated
+  private void adjustBuffersOnException() {
+    commitWatcher.releaseBuffersOnException();
+    refreshCurrentBuffer(bufferPool);
+  }
+
+  /**
+   * calls watchForCommit API of the Ratis Client. For Standalone client,
+   * it is a no op.
+   * @param bufferFull flag indicating whether bufferFull condition is hit or
+   *              its called as part flush/close
+   * @return minimum commit index replicated to all nodes
+   * @throws IOException IOException in case watch gets timed out
+   */
+  private void watchForCommit(boolean bufferFull) throws IOException {
+    checkOpen();
+    try {
+      XceiverClientReply reply = bufferFull ?
+          commitWatcher.watchOnFirstIndex() : commitWatcher.watchOnLastIndex();
+      if (reply != null) {
+        List<DatanodeDetails> dnList = reply.getDatanodes();
+        if (!dnList.isEmpty()) {
+          Pipeline pipe = xceiverClient.getPipeline();
+
+          LOG.warn("Failed to commit BlockId {} on {}. Failed nodes: {}",
+              blockID, pipe, dnList);
+          failedServers.addAll(dnList);
+        }
+      }
+    } catch (IOException ioe) {
+      setIoException(ioe);
+      throw getIoException();
+    }
+    refreshCurrentBuffer(bufferPool);
+
+  }
+
+  /**
+   * @param close whether putBlock is happening as part of closing the stream
+   * @param force true if no data was written since most recent putBlock and
+   *            stream is being closed
+   */
+  private CompletableFuture<ContainerProtos.
+      ContainerCommandResponseProto> executePutBlock(boolean close,
+      boolean force) throws IOException {
+    checkOpen();
+    long flushPos = totalDataFlushedLength;
+    final List<ChunkBuffer> byteBufferList;
+    if (!force) {
+      Preconditions.checkNotNull(bufferList);
+      byteBufferList = bufferList;
+      bufferList = null;
+      Preconditions.checkNotNull(byteBufferList);
+    } else {
+      byteBufferList = null;
+    }
+
+    try {
+      CompletableFuture.allOf(futures.toArray(EMPTY_FUTURE_ARRAY)).get();
+    } catch (Exception e) {
+      LOG.warn("Failed to write all chunks through stream: " + e);
+      throw new IOException(e);
+    }
+    if (close) {
+      dataStreamCloseReply = out.closeAsync();
+    }
+
+    CompletableFuture<ContainerProtos.
+        ContainerCommandResponseProto> flushFuture = null;
+    try {
+      BlockData blockData = containerBlockData.build();
+      XceiverClientReply asyncReply =
+          putBlockAsync(xceiverClient, blockData, close, token);
+      CompletableFuture<ContainerProtos.ContainerCommandResponseProto> future =
+          asyncReply.getResponse();
+      flushFuture = future.thenApplyAsync(e -> {
+        try {
+          validateResponse(e);
+        } catch (IOException sce) {
+          throw new CompletionException(sce);
+        }
+        // if the ioException is not set, putBlock is successful
+        if (getIoException() == null && !force) {
+          BlockID responseBlockID = BlockID.getFromProtobuf(
+              e.getPutBlock().getCommittedBlockLength().getBlockID());
+          Preconditions.checkState(blockID.get().getContainerBlockID()
+              .equals(responseBlockID.getContainerBlockID()));
+          // updates the bcsId of the block
+          blockID.set(responseBlockID);
+          if (LOG.isDebugEnabled()) {
+            LOG.debug(
+                "Adding index " + asyncReply.getLogIndex() + " commitMap size "
+                    + commitWatcher.getCommitInfoMapSize() + " flushLength "
+                    + flushPos + " numBuffers " + byteBufferList.size()
+                    + " blockID " + blockID + " bufferPool size" + bufferPool
+                    .getSize() + " currentBufferIndex " + bufferPool
+                    .getCurrentBufferIndex());
+          }
+          // for standalone protocol, logIndex will always be 0.
+          commitWatcher
+              .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList);
+        }
+        return e;
+      }, responseExecutor).exceptionally(e -> {
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("putBlock failed for blockID {} with exception {}",
+              blockID, e.getLocalizedMessage());
+        }
+        CompletionException ce = new CompletionException(e);
+        setIoException(ce);
+        throw ce;
+      });
+    } catch (IOException | ExecutionException e) {
+      throw new IOException(EXCEPTION_MSG + e.toString(), e);
+    } catch (InterruptedException ex) {
+      Thread.currentThread().interrupt();
+      handleInterruptedException(ex, false);
+    }
+    commitWatcher.getFutureMap().put(flushPos, flushFuture);
+    return flushFuture;
+  }
+
+  @Override
+  public void flush() throws IOException {
+    if (xceiverClientFactory != null && xceiverClient != null
+        && bufferPool != null && bufferPool.getSize() > 0
+        && (!config.isStreamBufferFlushDelay() ||
+            writtenDataLength - totalDataFlushedLength
+                >= config.getStreamBufferSize())) {
+      try {
+        handleFlush(false);
+      } catch (ExecutionException e) {
+        // just set the exception here as well in order to maintain sanctity of
+        // ioException field
+        handleExecutionException(e);
+      } catch (InterruptedException ex) {
+        Thread.currentThread().interrupt();
+        handleInterruptedException(ex, true);
+      }
+    }
+  }
+
+  private void writeChunk(ChunkBuffer buffer)
+      throws IOException {
+    // This data in the buffer will be pushed to datanode and a reference will
+    // be added to the bufferList. Once putBlock gets executed, this list will
+    // be marked null. Hence, during first writeChunk call after every putBlock
+    // call or during the first call to writeChunk here, the list will be null.
+
+    if (bufferList == null) {
+      bufferList = new ArrayList<>();
+    }
+    bufferList.add(buffer);
+    writeChunkToContainer(buffer.duplicate(0, buffer.position()));
+  }
+
+  /**
+   * @param close whether the flush is happening as part of closing the stream
+   */
+  private void handleFlush(boolean close)
+      throws IOException, InterruptedException, ExecutionException {
+    checkOpen();
+    // flush the last chunk data residing on the currentBuffer
+    if (totalDataFlushedLength < writtenDataLength) {
+      refreshCurrentBuffer(bufferPool);
+      Preconditions.checkArgument(currentBuffer.position() > 0);
+      if (currentBuffer.hasRemaining()) {
+        writeChunk(currentBuffer);
+      }
+      // This can be a partially filled chunk. Since we are flushing the buffer
+      // here, we just limit this buffer to the current position. So that next
+      // write will happen in new buffer
+      updateFlushLength();
+      executePutBlock(close, false);
+    } else if (close) {
+      // forcing an "empty" putBlock if stream is being closed without new
+      // data since latest flush - we need to send the "EOF" flag
+      executePutBlock(true, true);
+    }
+    waitOnFlushFutures();
+    watchForCommit(false);
+    // just check again if the exception is hit while waiting for the
+    // futures to ensure flush has indeed succeeded
+
+    // irrespective of whether the commitIndex2flushedDataMap is empty
+    // or not, ensure there is no exception set
+    checkOpen();
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (xceiverClientFactory != null && xceiverClient != null
+        && bufferPool != null && bufferPool.getSize() > 0) {
+      try {
+        handleFlush(true);
+        dataStreamCloseReply.get();
+      } catch (ExecutionException e) {
+        handleExecutionException(e);
+      } catch (InterruptedException ex) {
+        Thread.currentThread().interrupt();
+        handleInterruptedException(ex, true);
+      } finally {
+        cleanup(false);
+      }
+      // TODO: Turn the below buffer empty check on when Standalone pipeline
+      // is removed in the write path in tests
+      // Preconditions.checkArgument(buffer.position() == 0);
+      // bufferPool.checkBufferPoolEmpty();
+
+    }
+  }
+
+  private void waitOnFlushFutures()
+      throws InterruptedException, ExecutionException {
+    CompletableFuture<Void> combinedFuture = CompletableFuture.allOf(
+        commitWatcher.getFutureMap().values().toArray(
+            new CompletableFuture[commitWatcher.getFutureMap().size()]));
+    // wait for all the transactions to complete
+    combinedFuture.get();
+  }
+
+  private void validateResponse(
+      ContainerProtos.ContainerCommandResponseProto responseProto)
+      throws IOException {
+    try {
+      // if the ioException is already set, it means a prev request has failed
+      // just throw the exception. The current operation will fail with the
+      // original error
+      IOException exception = getIoException();
+      if (exception != null) {
+        throw exception;
+      }
+      ContainerProtocolCalls.validateContainerResponse(responseProto);
+    } catch (StorageContainerException sce) {
+      setIoException(sce);
+      throw sce;
+    }
+  }
+
+
+  private void setIoException(Exception e) {
+    IOException ioe = getIoException();
+    if (ioe == null) {
+      IOException exception =  new IOException(EXCEPTION_MSG + e.toString(), e);
+      ioException.compareAndSet(null, exception);
+    } else {
+      LOG.debug("Previous request had already failed with " + ioe.toString()
+          + " so subsequent request also encounters"
+          + " Storage Container Exception ", e);
+    }
+  }
+
+  public void cleanup(boolean invalidateClient) {
+    if (xceiverClientFactory != null) {
+      xceiverClientFactory.releaseClient(xceiverClient, invalidateClient);
+    }
+    xceiverClientFactory = null;
+    xceiverClient = null;
+    commitWatcher.cleanup();
+    if (bufferList !=  null) {
+      bufferList.clear();
+    }
+    bufferList = null;
+    responseExecutor.shutdown();
+  }
+
+  /**
+   * Checks if the stream is open or exception has occurred.
+   * If not, throws an exception.
+   *
+   * @throws IOException if stream is closed
+   */
+  private void checkOpen() throws IOException {
+    if (isClosed()) {
+      throw new IOException("BlockDataStreamOutput has been closed.");
+    } else if (getIoException() != null) {
+      adjustBuffersOnException();
+      throw getIoException();
+    }
+  }
+
+  public boolean isClosed() {
+    return xceiverClient == null;
+  }
+
+  private boolean needSync(long position) {
+    if (syncSize > 0) {
+      // TODO: or position >= fileLength
+      if (position - syncPosition >= syncSize) {
+        syncPosition = position;
+        return true;
+      }
+    }
+    return false;
+  }
+
+  /**
+   * Writes buffered data as a new chunk to the container and saves chunk
+   * information to be used later in putKey call.
+   *
+   * @throws IOException if there is an I/O error while performing the call
+   * @throws OzoneChecksumException if there is an error while computing
+   * checksum
+   */
+  private void writeChunkToContainer(ChunkBuffer chunk) throws IOException {
+    int effectiveChunkSize = chunk.remaining();
+    final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
+    final ByteString data = chunk.toByteString(
+        bufferPool.byteStringConversion());
+    ChecksumData checksumData = checksum.computeChecksum(chunk);
+    ChunkInfo chunkInfo = ChunkInfo.newBuilder()
+        .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex)
+        .setOffset(offset)
+        .setLen(effectiveChunkSize)
+        .setChecksumData(checksumData.getProtoBufMessage())
+        .build();
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Writing chunk {} length {} at offset {}",
+          chunkInfo.getChunkName(), effectiveChunkSize, offset);
+    }
+
+    CompletableFuture<DataStreamReply> future =
+        (needSync(offset + effectiveChunkSize) ?
+        out.writeAsync(data.asReadOnlyByteBuffer(), StandardWriteOption.SYNC) :
+        out.writeAsync(data.asReadOnlyByteBuffer()))
+        .whenCompleteAsync((r, e) -> {
+          if (e != null || !r.isSuccess()) {
+            if (e == null) {
+              e = new IOException("result is not success");
+            }
+            String msg = "Failed to write chunk " + chunkInfo.getChunkName() +
+                " " + "into block " + blockID;
+            LOG.debug("{}, exception: {}", msg, e.getLocalizedMessage());
+            CompletionException ce = new CompletionException(msg, e);
+            setIoException(ce);
+            throw ce;
+          }
+        }, responseExecutor);
+
+    futures.add(future);
+    containerBlockData.addChunks(chunkInfo);
+  }
+
+  @VisibleForTesting
+  public void setXceiverClient(XceiverClientRatis xceiverClient) {
+    this.xceiverClient = xceiverClient;
+  }
+
+  /**
+   * Handles InterruptedExecution.
+   *
+   * @param ex
+   * @param processExecutionException is optional, if passed as TRUE, then
+   * handle ExecutionException else skip it.
+   * @throws IOException
+   */
+  private void handleInterruptedException(Exception ex,
+      boolean processExecutionException)
+      throws IOException {
+    LOG.error("Command execution was interrupted.");
+    if(processExecutionException) {
+      handleExecutionException(ex);
+    } else {
+      throw new IOException(EXCEPTION_MSG + ex.toString(), ex);
+    }
+  }
+
+  /**
+   * Handles ExecutionException by adjusting buffers.
+   * @param ex
+   * @throws IOException
+   */
+  private void handleExecutionException(Exception ex) throws IOException {
+    setIoException(ex);
+    adjustBuffersOnException();
+    throw getIoException();
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufStreamOutput.java
new file mode 100644
index 0000000000..7f40737b70
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufStreamOutput.java
@@ -0,0 +1,58 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import io.netty.buffer.ByteBuf;
+
+import java.io.Closeable;
+import java.io.IOException;
+
+/**
+* This interface is for writing an output stream of ByteBuffers.
+* An ByteBufStreamOutput accepts Netty ByteBuf and sends them to some sink.
+*/
+public interface ByteBufStreamOutput extends Closeable {
+  /**
+   * Try to write all the bytes in ByteBuf b to DataStream.
+   *
+   * @param b the data.
+   * @exception IOException if an I/O error occurs.
+   */
+  void write(ByteBuf b) throws IOException;
+
+  /**
+   * Try to write the [off:off + len) slice in ByteBuf b to DataStream.
+   *
+   * @param b the data.
+   * @param off the start offset in the data.
+   * @param len the number of bytes to write.
+   * @exception  IOException  if an I/O error occurs.
+   */
+  default void write(ByteBuf b, int off, int len) throws IOException {
+    write(b.slice(off, len));
+  }
+
+  /**
+   * Flushes this DataStream output and forces any buffered output bytes
+   * to be written out.
+   *
+   * @exception  IOException  if an I/O error occurs.
+   */
+  void flush() throws IOException;
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 596afa6371..2d27858e16 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
@@ -599,6 +600,24 @@ public class OzoneBucket extends WithMetadata {
         .createKey(volumeName, name, key, size, replicationConfig, keyMetadata);
   }
 
+  /**
+   * Creates a new key in the bucket.
+   *
+   * @param key               Name of the key to be created.
+   * @param size              Size of the data the key will point to.
+   * @param replicationConfig Replication configuration.
+   * @return OzoneDataStreamOutput to which the data has to be written.
+   * @throws IOException
+   */
+  public OzoneDataStreamOutput createStreamKey(String key, long size,
+      ReplicationConfig replicationConfig,
+      Map<String, String> keyMetadata)
+      throws IOException {
+    return proxy
+        .createStreamKey(volumeName, name, key, size, replicationConfig,
+            keyMetadata);
+  }
+
   /**
    * Reads an existing key from the bucket.
    *
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
new file mode 100644
index 0000000000..6954742601
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
@@ -0,0 +1,294 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.client.io;
+
+import com.google.common.annotations.VisibleForTesting;
+import io.netty.buffer.ByteBuf;
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
+import org.apache.hadoop.hdds.scm.storage.BufferPool;
+import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
+import org.apache.hadoop.security.token.Token;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.Collections;
+
+/**
+ * Helper class used inside {@link BlockDataStreamOutput}.
+ * */
+public final class BlockDataStreamOutputEntry
+    implements ByteBufStreamOutput {
+
+  private final OzoneClientConfig config;
+  private ByteBufStreamOutput byteBufStreamOutput;
+  private BlockID blockID;
+  private final String key;
+  private final XceiverClientFactory xceiverClientManager;
+  private final Pipeline pipeline;
+  // total number of bytes that should be written to this stream
+  private final long length;
+  // the current position of this stream 0 <= currentPosition < length
+  private long currentPosition;
+  private final Token<OzoneBlockTokenIdentifier> token;
+
+  private BufferPool bufferPool;
+
+  @SuppressWarnings({"parameternumber", "squid:S00107"})
+  private BlockDataStreamOutputEntry(
+      BlockID blockID, String key,
+      XceiverClientFactory xceiverClientManager,
+      Pipeline pipeline,
+      long length,
+      BufferPool bufferPool,
+      Token<OzoneBlockTokenIdentifier> token,
+      OzoneClientConfig config
+  ) {
+    this.config = config;
+    this.byteBufStreamOutput = null;
+    this.blockID = blockID;
+    this.key = key;
+    this.xceiverClientManager = xceiverClientManager;
+    this.pipeline = pipeline;
+    this.token = token;
+    this.length = length;
+    this.currentPosition = 0;
+    this.bufferPool = bufferPool;
+  }
+
+  long getLength() {
+    return length;
+  }
+
+  Token<OzoneBlockTokenIdentifier> getToken() {
+    return token;
+  }
+
+  long getRemaining() {
+    return length - currentPosition;
+  }
+
+  /**
+   * BlockDataStreamOutput is initialized in this function. This makes sure that
+   * xceiverClient initialization is not done during preallocation and only
+   * done when data is written.
+   * @throws IOException if xceiverClient initialization fails
+   */
+  private void checkStream() throws IOException {
+    if (this.byteBufStreamOutput == null) {
+      this.byteBufStreamOutput =
+          new BlockDataStreamOutput(blockID, xceiverClientManager,
+              pipeline, bufferPool, config, token);
+    }
+  }
+
+  @Override
+  public void write(ByteBuf b) throws IOException {
+    checkStream();
+    final int len = b.readableBytes();
+    byteBufStreamOutput.write(b);
+    this.currentPosition += len;
+  }
+
+  @Override
+  public void flush() throws IOException {
+    if (this.byteBufStreamOutput != null) {
+      this.byteBufStreamOutput.flush();
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (this.byteBufStreamOutput != null) {
+      this.byteBufStreamOutput.close();
+      // after closing the chunkOutPutStream, blockId would have been
+      // reconstructed with updated bcsId
+      this.blockID =
+          ((BlockDataStreamOutput) byteBufStreamOutput).getBlockID();
+    }
+  }
+
+  boolean isClosed() {
+    if (byteBufStreamOutput != null) {
+      return  ((BlockDataStreamOutput) byteBufStreamOutput).isClosed();
+    }
+    return false;
+  }
+
+  long getTotalAckDataLength() {
+    if (byteBufStreamOutput != null) {
+      BlockDataStreamOutput out =
+          (BlockDataStreamOutput) this.byteBufStreamOutput;
+      blockID = out.getBlockID();
+      return out.getTotalAckDataLength();
+    } else {
+      // For a pre allocated block for which no write has been initiated,
+      // the ByteBufStreamOutput will be null here.
+      // In such cases, the default blockCommitSequenceId will be 0
+      return 0;
+    }
+  }
+
+  Collection<DatanodeDetails> getFailedServers() {
+    if (byteBufStreamOutput != null) {
+      BlockDataStreamOutput out =
+          (BlockDataStreamOutput) this.byteBufStreamOutput;
+      return out.getFailedServers();
+    }
+    return Collections.emptyList();
+  }
+
+  long getWrittenDataLength() {
+    if (byteBufStreamOutput != null) {
+      BlockDataStreamOutput out =
+          (BlockDataStreamOutput) this.byteBufStreamOutput;
+      return out.getWrittenDataLength();
+    } else {
+      // For a pre allocated block for which no write has been initiated,
+      // the ByteBufStreamOutput will be null here.
+      // In such cases, the default blockCommitSequenceId will be 0
+      return 0;
+    }
+  }
+
+  void cleanup(boolean invalidateClient) throws IOException {
+    checkStream();
+    BlockDataStreamOutput out =
+        (BlockDataStreamOutput) this.byteBufStreamOutput;
+    out.cleanup(invalidateClient);
+
+  }
+
+  void writeOnRetry(long len) throws IOException {
+    checkStream();
+    BlockDataStreamOutput out =
+        (BlockDataStreamOutput) this.byteBufStreamOutput;
+    out.writeOnRetry(len);
+    this.currentPosition += len;
+
+  }
+
+  /**
+   * Builder class for BlockDataStreamOutputEntry.
+   * */
+  public static class Builder {
+
+    private BlockID blockID;
+    private String key;
+    private XceiverClientFactory xceiverClientManager;
+    private Pipeline pipeline;
+    private long length;
+    private BufferPool bufferPool;
+    private Token<OzoneBlockTokenIdentifier> token;
+    private OzoneClientConfig config;
+
+    public Builder setBlockID(BlockID bID) {
+      this.blockID = bID;
+      return this;
+    }
+
+    public Builder setKey(String keys) {
+      this.key = keys;
+      return this;
+    }
+
+    public Builder setXceiverClientManager(
+        XceiverClientFactory
+        xClientManager) {
+      this.xceiverClientManager = xClientManager;
+      return this;
+    }
+
+    public Builder setPipeline(Pipeline ppln) {
+      this.pipeline = ppln;
+      return this;
+    }
+
+
+    public Builder setLength(long len) {
+      this.length = len;
+      return this;
+    }
+
+
+    public Builder setBufferPool(BufferPool pool) {
+      this.bufferPool = pool;
+      return this;
+    }
+
+    public Builder setConfig(OzoneClientConfig clientConfig) {
+      this.config = clientConfig;
+      return this;
+    }
+
+    public Builder setToken(Token<OzoneBlockTokenIdentifier> bToken) {
+      this.token = bToken;
+      return this;
+    }
+
+    public BlockDataStreamOutputEntry build() {
+      return new BlockDataStreamOutputEntry(blockID,
+          key,
+          xceiverClientManager,
+          pipeline,
+          length,
+          bufferPool,
+          token, config);
+    }
+  }
+
+  @VisibleForTesting
+  public ByteBufStreamOutput getByteBufStreamOutput() {
+    return byteBufStreamOutput;
+  }
+
+  public BlockID getBlockID() {
+    return blockID;
+  }
+
+  public String getKey() {
+    return key;
+  }
+
+  public XceiverClientFactory getXceiverClientManager() {
+    return xceiverClientManager;
+  }
+
+  public Pipeline getPipeline() {
+    return pipeline;
+  }
+
+  public long getCurrentPosition() {
+    return currentPosition;
+  }
+
+  public BufferPool getBufferPool() {
+    return bufferPool;
+  }
+
+  public void setCurrentPosition(long curPosition) {
+    this.currentPosition = curPosition;
+  }
+}
+
+
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
new file mode 100644
index 0000000000..94c505f2af
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -0,0 +1,324 @@
+
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.client.io;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.ByteStringConversion;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.storage.BufferPool;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.ListIterator;
+
+/**
+ * This class manages the stream entries list and handles block allocation
+ * from OzoneManager.
+ */
+public class BlockDataStreamOutputEntryPool {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(BlockDataStreamOutputEntryPool.class);
+
+  private final List<BlockDataStreamOutputEntry> streamEntries;
+  private final OzoneClientConfig config;
+  private int currentStreamIndex;
+  private final OzoneManagerProtocol omClient;
+  private final OmKeyArgs keyArgs;
+  private final XceiverClientFactory xceiverClientFactory;
+  private final String requestID;
+  private final BufferPool bufferPool;
+  private OmMultipartCommitUploadPartInfo commitUploadPartInfo;
+  private final long openID;
+  private final ExcludeList excludeList;
+
+  @SuppressWarnings({"parameternumber", "squid:S00107"})
+  public BlockDataStreamOutputEntryPool(
+      OzoneClientConfig config,
+      OzoneManagerProtocol omClient,
+      String requestId, ReplicationConfig replicationConfig,
+      String uploadID, int partNumber,
+      boolean isMultipart, OmKeyInfo info,
+      boolean unsafeByteBufferConversion,
+      XceiverClientFactory xceiverClientFactory, long openID
+  ) {
+    this.config = config;
+    this.xceiverClientFactory = xceiverClientFactory;
+    streamEntries = new ArrayList<>();
+    currentStreamIndex = 0;
+    this.omClient = omClient;
+    this.keyArgs = new OmKeyArgs.Builder().setVolumeName(info.getVolumeName())
+        .setBucketName(info.getBucketName()).setKeyName(info.getKeyName())
+        .setReplicationConfig(replicationConfig).setDataSize(info.getDataSize())
+        .setIsMultipartKey(isMultipart).setMultipartUploadID(uploadID)
+        .setMultipartUploadPartNumber(partNumber).build();
+    this.requestID = requestId;
+    this.openID = openID;
+    this.excludeList = new ExcludeList();
+
+    this.bufferPool =
+        new BufferPool(config.getStreamBufferSize(),
+            (int) (config.getStreamBufferMaxSize() / config
+                .getStreamBufferSize()),
+            ByteStringConversion
+                .createByteBufferConversion(unsafeByteBufferConversion));
+  }
+
+  /**
+   * A constructor for testing purpose only.
+   *
+   * @see KeyDataStreamOutput#KeyDataStreamOutput()
+   */
+  @VisibleForTesting
+  BlockDataStreamOutputEntryPool() {
+    streamEntries = new ArrayList<>();
+    omClient = null;
+    keyArgs = null;
+    xceiverClientFactory = null;
+    config =
+        new OzoneConfiguration().getObject(OzoneClientConfig.class);
+    config.setStreamBufferSize(0);
+    config.setStreamBufferMaxSize(0);
+    config.setStreamBufferFlushSize(0);
+    config.setStreamBufferFlushDelay(false);
+    requestID = null;
+    int chunkSize = 0;
+    bufferPool = new BufferPool(chunkSize, 1);
+
+    currentStreamIndex = 0;
+    openID = -1;
+    excludeList = new ExcludeList();
+  }
+
+  /**
+   * When a key is opened, it is possible that there are some blocks already
+   * allocated to it for this open session. In this case, to make use of these
+   * blocks, we need to add these blocks to stream entries. But, a key's version
+   * also includes blocks from previous versions, we need to avoid adding these
+   * old blocks to stream entries, because these old blocks should not be picked
+   * for write. To do this, the following method checks that, only those
+   * blocks created in this particular open version are added to stream entries.
+   *
+   * @param version the set of blocks that are pre-allocated.
+   * @param openVersion the version corresponding to the pre-allocation.
+   * @throws IOException
+   */
+  public void addPreallocateBlocks(OmKeyLocationInfoGroup version,
+      long openVersion) throws IOException {
+    // server may return any number of blocks, (0 to any)
+    // only the blocks allocated in this open session (block createVersion
+    // equals to open session version)
+    for (OmKeyLocationInfo subKeyInfo : version.getLocationList(openVersion)) {
+      addKeyLocationInfo(subKeyInfo);
+    }
+  }
+
+  private void addKeyLocationInfo(OmKeyLocationInfo subKeyInfo) {
+    Preconditions.checkNotNull(subKeyInfo.getPipeline());
+    BlockDataStreamOutputEntry.Builder builder =
+        new BlockDataStreamOutputEntry.Builder()
+            .setBlockID(subKeyInfo.getBlockID())
+            .setKey(keyArgs.getKeyName())
+            .setXceiverClientManager(xceiverClientFactory)
+            .setPipeline(subKeyInfo.getPipeline())
+            .setConfig(config)
+            .setLength(subKeyInfo.getLength())
+            .setBufferPool(bufferPool)
+            .setToken(subKeyInfo.getToken());
+    streamEntries.add(builder.build());
+  }
+
+  public List<OmKeyLocationInfo> getLocationInfoList()  {
+    List<OmKeyLocationInfo> locationInfoList = new ArrayList<>();
+    for (BlockDataStreamOutputEntry streamEntry : streamEntries) {
+      long length = streamEntry.getCurrentPosition();
+
+      // Commit only those blocks to OzoneManager which are not empty
+      if (length != 0) {
+        OmKeyLocationInfo info =
+            new OmKeyLocationInfo.Builder().setBlockID(streamEntry.getBlockID())
+                .setLength(streamEntry.getCurrentPosition()).setOffset(0)
+                .setToken(streamEntry.getToken())
+                .setPipeline(streamEntry.getPipeline()).build();
+        locationInfoList.add(info);
+      }
+      if (LOG.isDebugEnabled()) {
+        LOG.debug(
+            "block written " + streamEntry.getBlockID() + ", length " + length
+                + " bcsID " + streamEntry.getBlockID()
+                .getBlockCommitSequenceId());
+      }
+    }
+    return locationInfoList;
+  }
+
+  /**
+   * Discards the subsequent pre allocated blocks and removes the streamEntries
+   * from the streamEntries list for the container which is closed.
+   * @param containerID id of the closed container
+   * @param pipelineId id of the associated pipeline
+   */
+  void discardPreallocatedBlocks(long containerID, PipelineID pipelineId) {
+    // currentStreamIndex < streamEntries.size() signifies that, there are still
+    // pre allocated blocks available.
+
+    // This will be called only to discard the next subsequent unused blocks
+    // in the streamEntryList.
+    if (currentStreamIndex + 1 < streamEntries.size()) {
+      ListIterator<BlockDataStreamOutputEntry> streamEntryIterator =
+          streamEntries.listIterator(currentStreamIndex + 1);
+      while (streamEntryIterator.hasNext()) {
+        BlockDataStreamOutputEntry streamEntry = streamEntryIterator.next();
+        Preconditions.checkArgument(streamEntry.getCurrentPosition() == 0);
+        if ((streamEntry.getPipeline().getId().equals(pipelineId)) ||
+            (containerID != -1 &&
+                streamEntry.getBlockID().getContainerID() == containerID)) {
+          streamEntryIterator.remove();
+        }
+      }
+    }
+  }
+
+  List<BlockDataStreamOutputEntry> getStreamEntries() {
+    return streamEntries;
+  }
+
+  XceiverClientFactory getXceiverClientFactory() {
+    return xceiverClientFactory;
+  }
+
+  String getKeyName() {
+    return keyArgs.getKeyName();
+  }
+
+  long getKeyLength() {
+    return streamEntries.stream().mapToLong(
+        BlockDataStreamOutputEntry::getCurrentPosition).sum();
+  }
+  /**
+   * Contact OM to get a new block. Set the new block with the index (e.g.
+   * first block has index = 0, second has index = 1 etc.)
+   *
+   * The returned block is made to new BlockDataStreamOutputEntry to write.
+   *
+   * @throws IOException
+   */
+  private void allocateNewBlock() throws IOException {
+    if (!excludeList.isEmpty()) {
+      LOG.debug("Allocating block with {}", excludeList);
+    }
+    OmKeyLocationInfo subKeyInfo =
+        omClient.allocateBlock(keyArgs, openID, excludeList);
+    addKeyLocationInfo(subKeyInfo);
+  }
+
+
+  void commitKey(long offset) throws IOException {
+    if (keyArgs != null) {
+      // in test, this could be null
+      long length = getKeyLength();
+      Preconditions.checkArgument(offset == length);
+      keyArgs.setDataSize(length);
+      keyArgs.setLocationInfoList(getLocationInfoList());
+      // When the key is multipart upload part file upload, we should not
+      // commit the key, as this is not an actual key, this is a just a
+      // partial key of a large file.
+      if (keyArgs.getIsMultipartKey()) {
+        commitUploadPartInfo =
+            omClient.commitMultipartUploadPart(keyArgs, openID);
+      } else {
+        omClient.commitKey(keyArgs, openID);
+      }
+    } else {
+      LOG.warn("Closing KeyDataStreamOutput, but key args is null");
+    }
+  }
+
+  public BlockDataStreamOutputEntry getCurrentStreamEntry() {
+    if (streamEntries.isEmpty() || streamEntries.size() <= currentStreamIndex) {
+      return null;
+    } else {
+      return streamEntries.get(currentStreamIndex);
+    }
+  }
+
+  BlockDataStreamOutputEntry allocateBlockIfNeeded() throws IOException {
+    BlockDataStreamOutputEntry streamEntry = getCurrentStreamEntry();
+    if (streamEntry != null && streamEntry.isClosed()) {
+      // a stream entry gets closed either by :
+      // a. If the stream gets full
+      // b. it has encountered an exception
+      currentStreamIndex++;
+    }
+    if (streamEntries.size() <= currentStreamIndex) {
+      Preconditions.checkNotNull(omClient);
+      // allocate a new block, if a exception happens, log an error and
+      // throw exception to the caller directly, and the write fails.
+      allocateNewBlock();
+    }
+    // in theory, this condition should never violate due the check above
+    // still do a sanity check.
+    Preconditions.checkArgument(currentStreamIndex < streamEntries.size());
+    return streamEntries.get(currentStreamIndex);
+  }
+
+  long computeBufferData() {
+    return bufferPool.computeBufferData();
+  }
+
+  void cleanup() {
+    if (excludeList != null) {
+      excludeList.clear();
+    }
+    if (bufferPool != null) {
+      bufferPool.clearBufferPool();
+    }
+
+    if (streamEntries != null) {
+      streamEntries.clear();
+    }
+  }
+
+  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
+    return commitUploadPartInfo;
+  }
+
+  public ExcludeList getExcludeList() {
+    return excludeList;
+  }
+
+  boolean isEmpty() {
+    return streamEntries.isEmpty();
+  }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
new file mode 100644
index 0000000000..a9be11667c
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -0,0 +1,629 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.ozone.client.io;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import io.netty.buffer.ByteBuf;
+import org.apache.hadoop.fs.FSExceptionMessages;
+import org.apache.hadoop.fs.FileEncryptionInfo;
+import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+import org.apache.ratis.protocol.exceptions.AlreadyClosedException;
+import org.apache.ratis.protocol.exceptions.RaftRetryFailureException;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.Collection;
+import java.util.List;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * Maintaining a list of BlockInputStream. Write based on offset.
+ *
+ * Note that this may write to multiple containers in one write call. In case
+ * that first container succeeded but later ones failed, the succeeded writes
+ * are not rolled back.
+ *
+ * TODO : currently not support multi-thread access.
+ */
+public class KeyDataStreamOutput implements ByteBufStreamOutput {
+
+  private OzoneClientConfig config;
+
+  /**
+   * Defines stream action while calling handleFlushOrClose.
+   */
+  enum StreamAction {
+    FLUSH, CLOSE, FULL
+  }
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(KeyDataStreamOutput.class);
+
+  private boolean closed;
+  private FileEncryptionInfo feInfo;
+  private final Map<Class<? extends Throwable>, RetryPolicy> retryPolicyMap;
+  private int retryCount;
+  // how much of data is actually written yet to underlying stream
+  private long offset;
+  // how much data has been ingested into the stream
+  private long writeOffset;
+  // whether an exception is encountered while write and whole write could
+  // not succeed
+  private boolean isException;
+  private final BlockDataStreamOutputEntryPool blockDataStreamOutputEntryPool;
+
+  private long clientID;
+
+  /**
+   * A constructor for testing purpose only.
+   */
+  @VisibleForTesting
+  public KeyDataStreamOutput() {
+    closed = false;
+    this.retryPolicyMap = HddsClientUtils.getExceptionList()
+        .stream()
+        .collect(Collectors.toMap(Function.identity(),
+            e -> RetryPolicies.TRY_ONCE_THEN_FAIL));
+    retryCount = 0;
+    offset = 0;
+    blockDataStreamOutputEntryPool = new BlockDataStreamOutputEntryPool();
+  }
+
+  @VisibleForTesting
+  public List<BlockDataStreamOutputEntry> getStreamEntries() {
+    return blockDataStreamOutputEntryPool.getStreamEntries();
+  }
+
+  @VisibleForTesting
+  public XceiverClientFactory getXceiverClientFactory() {
+    return blockDataStreamOutputEntryPool.getXceiverClientFactory();
+  }
+
+  @VisibleForTesting
+  public List<OmKeyLocationInfo> getLocationInfoList() {
+    return blockDataStreamOutputEntryPool.getLocationInfoList();
+  }
+
+  @VisibleForTesting
+  public int getRetryCount() {
+    return retryCount;
+  }
+
+  @VisibleForTesting
+  public long getClientID() {
+    return clientID;
+  }
+
+  @SuppressWarnings({"parameternumber", "squid:S00107"})
+  public KeyDataStreamOutput(
+      OzoneClientConfig config,
+      OpenKeySession handler,
+      XceiverClientFactory xceiverClientManager,
+      OzoneManagerProtocol omClient, int chunkSize,
+      String requestId, ReplicationConfig replicationConfig,
+      String uploadID, int partNumber, boolean isMultipart,
+      boolean unsafeByteBufferConversion
+  ) {
+    this.config = config;
+    OmKeyInfo info = handler.getKeyInfo();
+    blockDataStreamOutputEntryPool =
+        new BlockDataStreamOutputEntryPool(
+            config,
+            omClient,
+            requestId, replicationConfig,
+            uploadID, partNumber,
+            isMultipart, info,
+            unsafeByteBufferConversion,
+            xceiverClientManager,
+            handler.getId());
+
+    // Retrieve the file encryption key info, null if file is not in
+    // encrypted bucket.
+    this.feInfo = info.getFileEncryptionInfo();
+    this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException(
+        config.getMaxRetryCount(), config.getRetryInterval());
+    this.retryCount = 0;
+    this.isException = false;
+    this.writeOffset = 0;
+    this.clientID = handler.getId();
+  }
+
+  /**
+   * When a key is opened, it is possible that there are some blocks already
+   * allocated to it for this open session. In this case, to make use of these
+   * blocks, we need to add these blocks to stream entries. But, a key's version
+   * also includes blocks from previous versions, we need to avoid adding these
+   * old blocks to stream entries, because these old blocks should not be picked
+   * for write. To do this, the following method checks that, only those
+   * blocks created in this particular open version are added to stream entries.
+   *
+   * @param version the set of blocks that are pre-allocated.
+   * @param openVersion the version corresponding to the pre-allocation.
+   * @throws IOException
+   */
+  public void addPreallocateBlocks(OmKeyLocationInfoGroup version,
+      long openVersion) throws IOException {
+    blockDataStreamOutputEntryPool.addPreallocateBlocks(version, openVersion);
+  }
+
+  @Override
+  public void write(ByteBuf b) throws IOException {
+    checkNotClosed();
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    final int len = b.readableBytes();
+    handleWrite(b, b.readerIndex(), len, false);
+    writeOffset += len;
+  }
+
+  private void handleWrite(ByteBuf b, int off, long len, boolean retry)
+      throws IOException {
+    while (len > 0) {
+      try {
+        BlockDataStreamOutputEntry current =
+            blockDataStreamOutputEntryPool.allocateBlockIfNeeded();
+        // length(len) will be in int range if the call is happening through
+        // write API of blockDataStreamOutput. Length can be in long range
+        // if it comes via Exception path.
+        int expectedWriteLen = Math.min((int) len,
+                (int) current.getRemaining());
+        long currentPos = current.getWrittenDataLength();
+        // writeLen will be updated based on whether the write was succeeded
+        // or if it sees an exception, how much the actual write was
+        // acknowledged.
+        int writtenLength =
+            writeToDataStreamOutput(current, retry, len, b,
+                expectedWriteLen, off, currentPos);
+        if (current.getRemaining() <= 0) {
+          // since the current block is already written close the stream.
+          handleFlushOrClose(StreamAction.FULL);
+        }
+        len -= writtenLength;
+        off += writtenLength;
+      } catch (Exception e) {
+        markStreamClosed();
+        throw new IOException(e);
+      }
+    }
+  }
+
+  private int writeToDataStreamOutput(BlockDataStreamOutputEntry current,
+      boolean retry, long len, ByteBuf b, int writeLen, int off,
+      long currentPos) throws IOException {
+    try {
+      if (retry) {
+        current.writeOnRetry(len);
+      } else {
+        current.write(b, off, writeLen);
+        offset += writeLen;
+      }
+    } catch (IOException ioe) {
+      // for the current iteration, totalDataWritten - currentPos gives the
+      // amount of data already written to the buffer
+
+      // In the retryPath, the total data to be written will always be equal
+      // to or less than the max length of the buffer allocated.
+      // The len specified here is the combined sum of the data length of
+      // the buffers
+      Preconditions.checkState(!retry || len <= config
+          .getStreamBufferMaxSize());
+      int dataWritten = (int) (current.getWrittenDataLength() - currentPos);
+      writeLen = retry ? (int) len : dataWritten;
+      // In retry path, the data written is already accounted in offset.
+      if (!retry) {
+        offset += writeLen;
+      }
+      LOG.debug("writeLen {}, total len {}", writeLen, len);
+      handleException(current, ioe);
+    }
+    return writeLen;
+  }
+
+  /**
+   * It performs following actions :
+   * a. Updates the committed length at datanode for the current stream in
+   * datanode.
+   * b. Reads the data from the underlying buffer and writes it the next stream.
+   *
+   * @param streamEntry StreamEntry
+   * @param exception   actual exception that occurred
+   * @throws IOException Throws IOException if Write fails
+   */
+  private void handleException(BlockDataStreamOutputEntry streamEntry,
+      IOException exception) throws IOException {
+    Throwable t = HddsClientUtils.checkForException(exception);
+    Preconditions.checkNotNull(t);
+    boolean retryFailure = checkForRetryFailure(t);
+    boolean containerExclusionException = false;
+    if (!retryFailure) {
+      containerExclusionException = checkIfContainerToExclude(t);
+    }
+    Pipeline pipeline = streamEntry.getPipeline();
+    PipelineID pipelineId = pipeline.getId();
+    long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
+    //set the correct length for the current stream
+    streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
+    long bufferedDataLen = blockDataStreamOutputEntryPool.computeBufferData();
+    if (containerExclusionException) {
+      LOG.debug(
+          "Encountered exception {}. The last committed block length is {}, "
+              + "uncommitted data length is {} retry count {}", exception,
+          totalSuccessfulFlushedData, bufferedDataLen, retryCount);
+    } else {
+      LOG.warn(
+          "Encountered exception {} on the pipeline {}. "
+              + "The last committed block length is {}, "
+              + "uncommitted data length is {} retry count {}", exception,
+          pipeline, totalSuccessfulFlushedData, bufferedDataLen, retryCount);
+    }
+    Preconditions.checkArgument(
+        bufferedDataLen <= config.getStreamBufferMaxSize());
+    Preconditions.checkArgument(
+        offset - blockDataStreamOutputEntryPool.getKeyLength() ==
+        bufferedDataLen);
+    long containerId = streamEntry.getBlockID().getContainerID();
+    Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
+    Preconditions.checkNotNull(failedServers);
+    ExcludeList excludeList = blockDataStreamOutputEntryPool.getExcludeList();
+    if (!failedServers.isEmpty()) {
+      excludeList.addDatanodes(failedServers);
+    }
+
+    // if the container needs to be excluded , add the container to the
+    // exclusion list , otherwise add the pipeline to the exclusion list
+    if (containerExclusionException) {
+      excludeList.addConatinerId(ContainerID.valueOf(containerId));
+    } else {
+      excludeList.addPipeline(pipelineId);
+    }
+    // just clean up the current stream.
+    streamEntry.cleanup(retryFailure);
+
+    // discard all subsequent blocks the containers and pipelines which
+    // are in the exclude list so that, the very next retry should never
+    // write data on the  closed container/pipeline
+    if (containerExclusionException) {
+      // discard subsequent pre allocated blocks from the streamEntries list
+      // from the closed container
+      blockDataStreamOutputEntryPool
+          .discardPreallocatedBlocks(streamEntry.getBlockID().getContainerID(),
+              null);
+    } else {
+      // In case there is timeoutException or Watch for commit happening over
+      // majority or the client connection failure to the leader in the
+      // pipeline, just discard all the pre allocated blocks on this pipeline.
+      // Next block allocation will happen with excluding this specific pipeline
+      // This will ensure if 2 way commit happens , it cannot span over multiple
+      // blocks
+      blockDataStreamOutputEntryPool
+          .discardPreallocatedBlocks(-1, pipelineId);
+    }
+    if (bufferedDataLen > 0) {
+      // If the data is still cached in the underlying stream, we need to
+      // allocate new block and write this data in the datanode.
+      handleRetry(exception, bufferedDataLen);
+      // reset the retryCount after handling the exception
+      retryCount = 0;
+    }
+  }
+
+  private void markStreamClosed() {
+    blockDataStreamOutputEntryPool.cleanup();
+    closed = true;
+  }
+
+  private void handleRetry(IOException exception, long len) throws IOException {
+    RetryPolicy retryPolicy = retryPolicyMap
+        .get(HddsClientUtils.checkForException(exception).getClass());
+    if (retryPolicy == null) {
+      retryPolicy = retryPolicyMap.get(Exception.class);
+    }
+    RetryPolicy.RetryAction action = null;
+    try {
+      action = retryPolicy.shouldRetry(exception, retryCount, 0, true);
+    } catch (Exception e) {
+      setExceptionAndThrow(new IOException(e));
+    }
+    if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
+      String msg = "";
+      if (action.reason != null) {
+        msg = "Retry request failed. " + action.reason;
+        LOG.error(msg, exception);
+      }
+      setExceptionAndThrow(new IOException(msg, exception));
+    }
+
+    // Throw the exception if the thread is interrupted
+    if (Thread.currentThread().isInterrupted()) {
+      LOG.warn("Interrupted while trying for retry");
+      setExceptionAndThrow(exception);
+    }
+    Preconditions.checkArgument(
+        action.action == RetryPolicy.RetryAction.RetryDecision.RETRY);
+    if (action.delayMillis > 0) {
+      try {
+        Thread.sleep(action.delayMillis);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        IOException ioe =  (IOException) new InterruptedIOException(
+            "Interrupted: action=" + action + ", retry policy=" + retryPolicy)
+            .initCause(e);
+        setExceptionAndThrow(ioe);
+      }
+    }
+    retryCount++;
+    if (LOG.isTraceEnabled()) {
+      LOG.trace("Retrying Write request. Already tried {} time(s); " +
+          "retry policy is {} ", retryCount, retryPolicy);
+    }
+    handleWrite(null, 0, len, true);
+  }
+
+  private void setExceptionAndThrow(IOException ioe) throws IOException {
+    isException = true;
+    throw ioe;
+  }
+
+  /**
+   * Checks if the provided exception signifies retry failure in ratis client.
+   * In case of retry failure, ratis client throws RaftRetryFailureException
+   * and all succeeding operations are failed with AlreadyClosedException.
+   */
+  private boolean checkForRetryFailure(Throwable t) {
+    return t instanceof RaftRetryFailureException
+        || t instanceof AlreadyClosedException;
+  }
+
+  // Every container specific exception from datatnode will be seen as
+  // StorageContainerException
+  private boolean checkIfContainerToExclude(Throwable t) {
+    return t instanceof StorageContainerException;
+  }
+
+  @Override
+  public void flush() throws IOException {
+    checkNotClosed();
+    handleFlushOrClose(StreamAction.FLUSH);
+  }
+
+  /**
+   * Close or Flush the latest outputStream depending upon the action.
+   * This function gets called when while write is going on, the current stream
+   * gets full or explicit flush or close request is made by client. when the
+   * stream gets full and we try to close the stream , we might end up hitting
+   * an exception in the exception handling path, we write the data residing in
+   * in the buffer pool to a new Block. In cases, as such, when the data gets
+   * written to new stream , it will be at max half full. In such cases, we
+   * should just write the data and not close the stream as the block won't be
+   * completely full.
+   *
+   * @param op Flag which decides whether to call close or flush on the
+   *           outputStream.
+   * @throws IOException In case, flush or close fails with exception.
+   */
+  @SuppressWarnings("squid:S1141")
+  private void handleFlushOrClose(StreamAction op) throws IOException {
+    if (!blockDataStreamOutputEntryPool.isEmpty()) {
+      while (true) {
+        try {
+          BlockDataStreamOutputEntry entry =
+              blockDataStreamOutputEntryPool.getCurrentStreamEntry();
+          if (entry != null) {
+            try {
+              handleStreamAction(entry, op);
+            } catch (IOException ioe) {
+              handleException(entry, ioe);
+              continue;
+            }
+          }
+          return;
+        } catch (Exception e) {
+          markStreamClosed();
+          throw e;
+        }
+      }
+    }
+  }
+
+  private void handleStreamAction(BlockDataStreamOutputEntry entry,
+                                  StreamAction op) throws IOException {
+    Collection<DatanodeDetails> failedServers = entry.getFailedServers();
+    // failed servers can be null in case there is no data written in
+    // the stream
+    if (!failedServers.isEmpty()) {
+      blockDataStreamOutputEntryPool.getExcludeList().addDatanodes(
+          failedServers);
+    }
+    switch (op) {
+    case CLOSE:
+      entry.close();
+      break;
+    case FULL:
+      if (entry.getRemaining() == 0) {
+        entry.close();
+      }
+      break;
+    case FLUSH:
+      entry.flush();
+      break;
+    default:
+      throw new IOException("Invalid Operation");
+    }
+  }
+
+  /**
+   * Commit the key to OM, this will add the blocks as the new key blocks.
+   *
+   * @throws IOException
+   */
+  @Override
+  public void close() throws IOException {
+    if (closed) {
+      return;
+    }
+    closed = true;
+    try {
+      handleFlushOrClose(StreamAction.CLOSE);
+      if (!isException) {
+        Preconditions.checkArgument(writeOffset == offset);
+      }
+      blockDataStreamOutputEntryPool.commitKey(offset);
+    } finally {
+      blockDataStreamOutputEntryPool.cleanup();
+    }
+  }
+
+  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
+    return blockDataStreamOutputEntryPool.getCommitUploadPartInfo();
+  }
+
+  public FileEncryptionInfo getFileEncryptionInfo() {
+    return feInfo;
+  }
+
+  @VisibleForTesting
+  public ExcludeList getExcludeList() {
+    return blockDataStreamOutputEntryPool.getExcludeList();
+  }
+
+  /**
+   * Builder class of KeyDataStreamOutput.
+   */
+  public static class Builder {
+    private OpenKeySession openHandler;
+    private XceiverClientFactory xceiverManager;
+    private OzoneManagerProtocol omClient;
+    private int chunkSize;
+    private String requestID;
+    private String multipartUploadID;
+    private int multipartNumber;
+    private boolean isMultipartKey;
+    private boolean unsafeByteBufferConversion;
+    private OzoneClientConfig clientConfig;
+    private ReplicationConfig replicationConfig;
+
+    public Builder setMultipartUploadID(String uploadID) {
+      this.multipartUploadID = uploadID;
+      return this;
+    }
+
+    public Builder setMultipartNumber(int partNumber) {
+      this.multipartNumber = partNumber;
+      return this;
+    }
+
+    public Builder setHandler(OpenKeySession handler) {
+      this.openHandler = handler;
+      return this;
+    }
+
+    public Builder setXceiverClientManager(XceiverClientFactory manager) {
+      this.xceiverManager = manager;
+      return this;
+    }
+
+    public Builder setOmClient(OzoneManagerProtocol client) {
+      this.omClient = client;
+      return this;
+    }
+
+    public Builder setChunkSize(int size) {
+      this.chunkSize = size;
+      return this;
+    }
+
+    public Builder setRequestID(String id) {
+      this.requestID = id;
+      return this;
+    }
+
+    public Builder setIsMultipartKey(boolean isMultipart) {
+      this.isMultipartKey = isMultipart;
+      return this;
+    }
+
+    public Builder setConfig(OzoneClientConfig config) {
+      this.clientConfig = config;
+      return this;
+    }
+
+    public Builder enableUnsafeByteBufferConversion(boolean enabled) {
+      this.unsafeByteBufferConversion = enabled;
+      return this;
+    }
+
+
+    public Builder setReplicationConfig(ReplicationConfig replConfig) {
+      this.replicationConfig = replConfig;
+      return this;
+    }
+
+    public KeyDataStreamOutput build() {
+      return new KeyDataStreamOutput(
+          clientConfig,
+          openHandler,
+          xceiverManager,
+          omClient,
+          chunkSize,
+          requestID,
+          replicationConfig,
+          multipartUploadID,
+          multipartNumber,
+          isMultipartKey,
+          unsafeByteBufferConversion);
+    }
+
+  }
+
+  /**
+   * Verify that the output stream is open. Non blocking; this gives
+   * the last state of the volatile {@link #closed} field.
+   * @throws IOException if the connection is closed.
+   */
+  private void checkNotClosed() throws IOException {
+    if (closed) {
+      throw new IOException(
+          ": " + FSExceptionMessages.STREAM_IS_CLOSED + " Key: "
+              + blockDataStreamOutputEntryPool.getKeyName());
+    }
+  }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
new file mode 100644
index 0000000000..378b86872e
--- /dev/null
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.client.io;
+
+import io.netty.buffer.ByteBuf;
+import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
+
+import java.io.IOException;
+
+/**
+ * OzoneDataStreamOutput is used to write data into Ozone.
+ * It uses SCM's {@link KeyDataStreamOutput} for writing the data.
+ */
+public class OzoneDataStreamOutput implements ByteBufStreamOutput {
+
+  private final ByteBufStreamOutput byteBufStreamOutput;
+
+  /**
+   * Constructs OzoneDataStreamOutput with KeyDataStreamOutput.
+   *
+   * @param byteBufStreamOutput
+   */
+  public OzoneDataStreamOutput(ByteBufStreamOutput byteBufStreamOutput) {
+    this.byteBufStreamOutput = byteBufStreamOutput;
+  }
+
+  @Override
+  public void write(ByteBuf b) throws IOException {
+    byteBufStreamOutput.write(b);
+  }
+
+  @Override
+  public synchronized void flush() throws IOException {
+    byteBufStreamOutput.flush();
+  }
+
+  @Override
+  public synchronized void close() throws IOException {
+    //commitKey can be done here, if needed.
+    byteBufStreamOutput.close();
+  }
+
+  public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
+    if (byteBufStreamOutput instanceof KeyDataStreamOutput) {
+      return ((KeyDataStreamOutput)
+              byteBufStreamOutput).getCommitUploadPartInfo();
+    }
+    // Otherwise return null.
+    return null;
+  }
+
+  public ByteBufStreamOutput getByteBufStreamOutput() {
+    return byteBufStreamOutput;
+  }
+}
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 70a04406a0..8f7f385628 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -40,6 +40,7 @@ import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.client.TenantArgs;
 import org.apache.hadoop.ozone.client.VolumeArgs;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
@@ -310,6 +311,20 @@ public interface ClientProtocol {
       Map<String, String> metadata)
       throws IOException;
 
+  /**
+   * Writes a key in an existing bucket.
+   * @param volumeName Name of the Volume
+   * @param bucketName Name of the Bucket
+   * @param keyName Name of the Key
+   * @param size Size of the data
+   * @param metadata custom key value metadata
+   * @return {@link OzoneDataStreamOutput}
+   *
+   */
+  OzoneDataStreamOutput createStreamKey(String volumeName, String bucketName,
+      String keyName, long size, ReplicationConfig replicationConfig,
+      Map<String, String> metadata)
+      throws IOException;
 
   /**
    * Reads a key from an existing bucket.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 973f98ff3b..8c07a58fa4 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -94,11 +94,13 @@ import org.apache.hadoop.ozone.client.VolumeArgs;
 import org.apache.hadoop.ozone.client.io.BlockInputStreamFactory;
 import org.apache.hadoop.ozone.client.io.BlockInputStreamFactoryImpl;
 import org.apache.hadoop.ozone.client.io.ECKeyOutputStream;
+import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.KeyInputStream;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.LengthInputStream;
 import org.apache.hadoop.ozone.client.io.MultipartCryptoKeyInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneCryptoInputStream;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
@@ -1196,6 +1198,48 @@ public class RpcClient implements ClientProtocol {
     return createOutputStream(openKey, requestId);
   }
 
+  @Override
+  public OzoneDataStreamOutput createStreamKey(
+      String volumeName, String bucketName, String keyName, long size,
+      ReplicationConfig replicationConfig,
+      Map<String, String> metadata)
+      throws IOException {
+    verifyVolumeName(volumeName);
+    verifyBucketName(bucketName);
+    if (checkKeyNameEnabled) {
+      HddsClientUtils.verifyKeyName(keyName);
+    }
+    HddsClientUtils.checkNotNull(keyName, replicationConfig);
+    String requestId = UUID.randomUUID().toString();
+
+    OmKeyArgs.Builder builder = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(size)
+        .setReplicationConfig(replicationConfig)
+        .addAllMetadata(metadata)
+        .setAcls(getAclList());
+
+    if (Boolean.parseBoolean(metadata.get(OzoneConsts.GDPR_FLAG))) {
+      try{
+        GDPRSymmetricKey gKey = new GDPRSymmetricKey(new SecureRandom());
+        builder.addAllMetadata(gKey.getKeyDetails());
+      } catch (Exception e) {
+        if (e instanceof InvalidKeyException &&
+            e.getMessage().contains("Illegal key size or default parameters")) {
+          LOG.error("Missing Unlimited Strength Policy jars. Please install " +
+              "Java Cryptography Extension (JCE) Unlimited Strength " +
+              "Jurisdiction Policy Files");
+        }
+        throw new IOException(e);
+      }
+    }
+
+    OpenKeySession openKey = ozoneManagerClient.openKey(builder.build());
+    return createDataStreamOutput(openKey, requestId, replicationConfig);
+  }
+
   private KeyProvider.KeyVersion getDEK(FileEncryptionInfo feInfo)
       throws IOException {
     // check crypto protocol version
@@ -1876,6 +1920,24 @@ public class RpcClient implements ClientProtocol {
           cryptoInputStreams);
     }
   }
+  private OzoneDataStreamOutput createDataStreamOutput(OpenKeySession openKey,
+      String requestId, ReplicationConfig replicationConfig)
+      throws IOException {
+    KeyDataStreamOutput keyOutputStream =
+        new KeyDataStreamOutput.Builder()
+            .setHandler(openKey)
+            .setXceiverClientManager(xceiverClientManager)
+            .setOmClient(ozoneManagerClient)
+            .setRequestID(requestId)
+            .setReplicationConfig(replicationConfig)
+            .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
+            .setConfig(clientConfig)
+            .build();
+    keyOutputStream
+        .addPreallocateBlocks(openKey.getKeyInfo().getLatestVersionLocations(),
+            openKey.getOpenVersion());
+    return new OzoneDataStreamOutput(keyOutputStream);
+  }
 
   private OzoneOutputStream createOutputStream(OpenKeySession openKey,
       String requestId) throws IOException {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
new file mode 100644
index 0000000000..4d52d89490
--- /dev/null
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -0,0 +1,181 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package org.apache.hadoop.ozone.client.rpc;
+
+import io.netty.buffer.Unpooled;
+import org.apache.hadoop.conf.StorageUnit;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
+import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
+import org.apache.hadoop.ozone.container.ContainerTestHelper;
+import org.apache.hadoop.ozone.container.TestHelper;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.Timeout;
+
+import java.io.IOException;
+import java.util.UUID;
+import java.util.concurrent.TimeUnit;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+
+/**
+ * Tests BlockDataStreamOutput class.
+ */
+public class TestBlockDataStreamOutput {
+
+  /**
+    * Set a timeout for each test.
+    */
+  @Rule
+  public Timeout timeout = Timeout.seconds(300);
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf = new OzoneConfiguration();
+  private static OzoneClient client;
+  private static ObjectStore objectStore;
+  private static int chunkSize;
+  private static int flushSize;
+  private static int maxFlushSize;
+  private static int blockSize;
+  private static String volumeName;
+  private static String bucketName;
+  private static String keyString;
+
+  /**
+   * Create a MiniDFSCluster for testing.
+   * <p>
+   * Ozone is made active by setting OZONE_ENABLED = true
+   *
+   * @throws IOException
+   */
+  @BeforeClass
+  public static void init() throws Exception {
+    chunkSize = 100;
+    flushSize = 2 * chunkSize;
+    maxFlushSize = 2 * flushSize;
+    blockSize = 2 * maxFlushSize;
+
+    OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
+    clientConfig.setChecksumType(ChecksumType.NONE);
+    clientConfig.setStreamBufferFlushDelay(false);
+    conf.setFromObject(clientConfig);
+
+    conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);
+    conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, TimeUnit.SECONDS);
+    conf.setQuietMode(false);
+    conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
+        StorageUnit.MB);
+
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(7)
+        .setTotalPipelineNumLimit(10)
+        .setBlockSize(blockSize)
+        .setChunkSize(chunkSize)
+        .setStreamBufferFlushSize(flushSize)
+        .setStreamBufferMaxSize(maxFlushSize)
+        .setStreamBufferSizeUnit(StorageUnit.BYTES)
+        .build();
+    cluster.waitForClusterToBeReady();
+    //the easiest way to create an open container is creating a key
+    client = OzoneClientFactory.getRpcClient(conf);
+    objectStore = client.getObjectStore();
+    keyString = UUID.randomUUID().toString();
+    volumeName = "testblockoutputstream";
+    bucketName = volumeName;
+    objectStore.createVolume(volumeName);
+    objectStore.getVolume(volumeName).createBucket(bucketName);
+  }
+
+  private String getKeyName() {
+    return UUID.randomUUID().toString();
+  }
+
+  /**
+   * Shutdown MiniDFSCluster.
+   */
+  @AfterClass
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testMultiChunkWrite() throws Exception {
+    // write data less than 1 chunk size use streaming.
+    String keyName1 = getKeyName();
+    OzoneDataStreamOutput key1 = createKey(
+        keyName1, ReplicationType.RATIS, 0);
+    int dataLength1 = chunkSize/2;
+    byte[] data1 =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength1)
+            .getBytes(UTF_8);
+    key1.write(Unpooled.copiedBuffer(data1));
+    // now close the stream, It will update the key length.
+    key1.close();
+    validateData(keyName1, data1);
+
+    // write data more than 1 chunk size use streaming.
+    String keyName2 = getKeyName();
+    OzoneDataStreamOutput key2 = createKey(
+        keyName2, ReplicationType.RATIS, 0);
+    int dataLength2 = chunkSize + 50;
+    byte[] data2 =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength2)
+            .getBytes(UTF_8);
+    key2.write(Unpooled.copiedBuffer(data2));
+    // now close the stream, It will update the key length.
+    key2.close();
+    validateData(keyName2, data2);
+
+    // write data more than 1 block size use streaming.
+    String keyName3 = getKeyName();
+    OzoneDataStreamOutput key3 = createKey(
+        keyName3, ReplicationType.RATIS, 0);
+    int dataLength3 = blockSize + 50;
+    byte[] data3 =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength3)
+            .getBytes(UTF_8);
+    key3.write(Unpooled.copiedBuffer(data3));
+    // now close the stream, It will update the key length.
+    key3.close();
+    validateData(keyName3, data3);
+  }
+
+  private OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
+      long size) throws Exception {
+    return TestHelper.createStreamKey(
+        keyName, type, size, objectStore, volumeName, bucketName);
+  }
+  private void validateData(String keyName, byte[] data) throws Exception {
+    TestHelper
+        .validateData(keyName, data, objectStore, volumeName, bucketName);
+  }
+
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index dae6e383f8..cf3a51241e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
@@ -134,8 +135,23 @@ public final class TestHelper {
     }
     org.apache.hadoop.hdds.client.ReplicationFactor factor =
             org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+    ReplicationConfig config =
+            ReplicationConfig.fromTypeAndFactor(type, factor);
     return objectStore.getVolume(volumeName).getBucket(bucketName)
-        .createKey(keyName, size, type, factor, new HashMap<>());
+        .createKey(keyName, size, config, new HashMap<>());
+  }
+
+  public static OzoneDataStreamOutput createStreamKey(String keyName,
+      ReplicationType type, long size, ObjectStore objectStore,
+      String volumeName, String bucketName) throws Exception {
+    org.apache.hadoop.hdds.client.ReplicationFactor factor =
+        type == ReplicationType.STAND_ALONE ?
+            org.apache.hadoop.hdds.client.ReplicationFactor.ONE :
+            org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+    ReplicationConfig config =
+        ReplicationConfig.fromTypeAndFactor(type, factor);
+    return objectStore.getVolume(volumeName).getBucket(bucketName)
+        .createStreamKey(keyName, size, config, new HashMap<>());
   }
 
   public static OzoneOutputStream createKey(String keyName,
@@ -143,8 +159,10 @@ public final class TestHelper {
       org.apache.hadoop.hdds.client.ReplicationFactor factor, long size,
       ObjectStore objectStore, String volumeName, String bucketName)
       throws Exception {
+    ReplicationConfig config =
+            ReplicationConfig.fromTypeAndFactor(type, factor);
     return objectStore.getVolume(volumeName).getBucket(bucketName)
-        .createKey(keyName, size, type, factor, new HashMap<>());
+        .createKey(keyName, size, config, new HashMap<>());
   }
 
   public static OzoneOutputStream createKey(String keyName,
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index 7d7885d168..e4b842eaca 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -23,9 +23,14 @@ import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 import java.util.HashMap;
 import java.util.Map;
 
+import io.netty.buffer.ByteBuf;
+import io.netty.buffer.Unpooled;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.io.IOUtils;
@@ -34,6 +39,7 @@ import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientException;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.shell.OzoneAddress;
 
 import org.apache.commons.codec.digest.DigestUtils;
@@ -89,10 +95,36 @@ public class PutKeyHandler extends KeyHandler {
 
     int chunkSize = (int) getConf().getStorageSize(OZONE_SCM_CHUNK_SIZE_KEY,
         OZONE_SCM_CHUNK_SIZE_DEFAULT, StorageUnit.BYTES);
-    try (InputStream input = new FileInputStream(dataFile);
-        OutputStream output = bucket.createKey(keyName, dataFile.length(),
-            replicationConfig, keyMetadata)) {
-      IOUtils.copyBytes(input, output, chunkSize);
+
+    if (dataFile.length() <= chunkSize) {
+      if (isVerbose()) {
+        out().println("API: async");
+      }
+      try (InputStream input = new FileInputStream(dataFile);
+           OutputStream output = bucket.createKey(keyName, dataFile.length(),
+               replicationConfig, keyMetadata)) {
+        IOUtils.copyBytes(input, output, chunkSize);
+      }
+    } else {
+      if (isVerbose()) {
+        out().println("API: streaming");
+      }
+      try (RandomAccessFile raf = new RandomAccessFile(dataFile, "r");
+           OzoneDataStreamOutput out = bucket.createStreamKey(keyName,
+               dataFile.length(), replicationConfig, keyMetadata)) {
+        FileChannel ch = raf.getChannel();
+        long len = raf.length();
+        long off = 0;
+        while (len > 0) {
+          long writeLen = Math.min(len, chunkSize);
+          ByteBuffer segment =
+              ch.map(FileChannel.MapMode.READ_ONLY, off, writeLen);
+          ByteBuf buf = Unpooled.wrappedBuffer(segment);
+          out.write(buf);
+          off += writeLen;
+          len -= writeLen;
+        }
+      }
     }
   }
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 07/38: HDDS-5705. [Ozone-Streaming] Change ByteBufStreamOutput to ByteBufferStreamOutput (#2603)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 81b9f3d7c11cc882975e982ee5cf4cc49417cab2
Author: Kaijie Chen <ch...@kaijie.org>
AuthorDate: Wed Sep 8 09:25:17 2021 +0800

    HDDS-5705. [Ozone-Streaming] Change ByteBufStreamOutput to ByteBufferStreamOutput (#2603)
    
    (cherry picked from commit 3ea9af3d59b5b7106b94f9fc7623a1705f20cd1d)
    (cherry picked from commit 74c5f0f5fca4475bb4369fd89ab07424fa87008b)
---
 hadoop-hdds/client/pom.xml                         |  4 --
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 26 +++++----
 ...reamOutput.java => ByteBufferStreamOutput.java} | 15 +++--
 .../client/io/BlockDataStreamOutputEntry.java      | 51 ++++++++---------
 .../ozone/client/io/KeyDataStreamOutput.java       | 15 +++--
 .../ozone/client/io/OzoneDataStreamOutput.java     | 30 +++++-----
 .../client/rpc/TestBlockDataStreamOutput.java      | 66 +++++++++-------------
 .../hadoop/ozone/shell/keys/PutKeyHandler.java     |  8 +--
 8 files changed, 98 insertions(+), 117 deletions(-)

diff --git a/hadoop-hdds/client/pom.xml b/hadoop-hdds/client/pom.xml
index 2e5bb745fa..3e3ff0cb9f 100644
--- a/hadoop-hdds/client/pom.xml
+++ b/hadoop-hdds/client/pom.xml
@@ -74,10 +74,6 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <groupId>org.junit.jupiter</groupId>
       <artifactId>junit-jupiter-params</artifactId>
     </dependency>
-    <dependency>
-      <groupId>io.netty</groupId>
-      <artifactId>netty-buffer</artifactId>
-    </dependency>
   </dependencies>
 
   <build>
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 39ec2f9219..d0419fa0c3 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdds.scm.storage;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import io.netty.buffer.ByteBuf;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -46,6 +45,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.CompletableFuture;
@@ -59,7 +59,7 @@ import java.util.concurrent.atomic.AtomicReference;
 import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlockAsync;
 
 /**
- * An {@link ByteBufStreamOutput} used by the REST service in combination
+ * An {@link ByteBufferStreamOutput} used by the REST service in combination
  * with the SCMClient to write the value of a key to a sequence
  * of container chunks.  Writes are buffered locally and periodically written to
  * the container as a new chunk.  In order to preserve the semantics that
@@ -74,7 +74,7 @@ import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlock
  * This class encapsulates all state management for buffering and writing
  * through to the container.
  */
-public class BlockDataStreamOutput implements ByteBufStreamOutput {
+public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   public static final Logger LOG =
       LoggerFactory.getLogger(BlockDataStreamOutput.class);
   public static final String EXCEPTION_MSG =
@@ -209,16 +209,16 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
   }
 
   @Override
-  public void write(ByteBuf buf) throws IOException {
+  public void write(ByteBuffer b, int off, int len) throws IOException {
     checkOpen();
-    if (buf == null) {
+    if (b == null) {
       throw new NullPointerException();
     }
-    final int len = buf.readableBytes();
     if (len == 0) {
       return;
     }
-    writeChunkToContainer(buf);
+    writeChunkToContainer(
+            (ByteBuffer) b.asReadOnlyBuffer().position(off).limit(off + len));
 
     writtenDataLength += len;
   }
@@ -476,15 +476,17 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
    * Writes buffered data as a new chunk to the container and saves chunk
    * information to be used later in putKey call.
    *
+   * @param buf chunk data to write, from position to limit
    * @throws IOException if there is an I/O error while performing the call
    * @throws OzoneChecksumException if there is an error while computing
    * checksum
    */
-  private void writeChunkToContainer(ByteBuf buf)
+  private void writeChunkToContainer(ByteBuffer buf)
       throws IOException {
-    ChecksumData checksumData = checksum.computeChecksum(buf.nioBuffer());
-    int effectiveChunkSize = buf.readableBytes();
+    final int effectiveChunkSize = buf.remaining();
     final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
+    ChecksumData checksumData =
+        checksum.computeChecksum(buf.asReadOnlyBuffer());
     ChunkInfo chunkInfo = ChunkInfo.newBuilder()
         .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex)
         .setOffset(offset)
@@ -499,8 +501,8 @@ public class BlockDataStreamOutput implements ByteBufStreamOutput {
 
     CompletableFuture<DataStreamReply> future =
         (needSync(offset + effectiveChunkSize) ?
-            out.writeAsync(buf.nioBuffer(), StandardWriteOption.SYNC) :
-            out.writeAsync(buf.nioBuffer()))
+            out.writeAsync(buf, StandardWriteOption.SYNC) :
+            out.writeAsync(buf))
             .whenCompleteAsync((r, e) -> {
               if (e != null || !r.isSuccess()) {
                 if (e == null) {
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java
similarity index 82%
rename from hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufStreamOutput.java
rename to hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java
index 7f40737b70..0650a685b6 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ByteBufferStreamOutput.java
@@ -18,23 +18,24 @@
 
 package org.apache.hadoop.hdds.scm.storage;
 
-import io.netty.buffer.ByteBuf;
-
 import java.io.Closeable;
 import java.io.IOException;
+import java.nio.ByteBuffer;
 
 /**
 * This interface is for writing an output stream of ByteBuffers.
-* An ByteBufStreamOutput accepts Netty ByteBuf and sends them to some sink.
+* An ByteBufferStreamOutput accepts nio ByteBuffer and sends them to some sink.
 */
-public interface ByteBufStreamOutput extends Closeable {
+public interface ByteBufferStreamOutput extends Closeable {
   /**
    * Try to write all the bytes in ByteBuf b to DataStream.
    *
    * @param b the data.
    * @exception IOException if an I/O error occurs.
    */
-  void write(ByteBuf b) throws IOException;
+  default void write(ByteBuffer b) throws IOException {
+    write(b, b.position(), b.remaining());
+  }
 
   /**
    * Try to write the [off:off + len) slice in ByteBuf b to DataStream.
@@ -44,9 +45,7 @@ public interface ByteBufStreamOutput extends Closeable {
    * @param len the number of bytes to write.
    * @exception  IOException  if an I/O error occurs.
    */
-  default void write(ByteBuf b, int off, int len) throws IOException {
-    write(b.slice(off, len));
-  }
+  void write(ByteBuffer b, int off, int len) throws IOException;
 
   /**
    * Flushes this DataStream output and forces any buffered output bytes
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
index 98907bf8af..f0c3a43e89 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
@@ -18,18 +18,18 @@
 package org.apache.hadoop.ozone.client.io;
 
 import com.google.common.annotations.VisibleForTesting;
-import io.netty.buffer.ByteBuf;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
-import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
 
@@ -37,10 +37,10 @@ import java.util.Collections;
  * Helper class used inside {@link BlockDataStreamOutput}.
  * */
 public final class BlockDataStreamOutputEntry
-    implements ByteBufStreamOutput {
+    implements ByteBufferStreamOutput {
 
   private final OzoneClientConfig config;
-  private ByteBufStreamOutput byteBufStreamOutput;
+  private ByteBufferStreamOutput byteBufferStreamOutput;
   private BlockID blockID;
   private final String key;
   private final XceiverClientFactory xceiverClientManager;
@@ -61,7 +61,7 @@ public final class BlockDataStreamOutputEntry
       OzoneClientConfig config
   ) {
     this.config = config;
-    this.byteBufStreamOutput = null;
+    this.byteBufferStreamOutput = null;
     this.blockID = blockID;
     this.key = key;
     this.xceiverClientManager = xceiverClientManager;
@@ -90,63 +90,62 @@ public final class BlockDataStreamOutputEntry
    * @throws IOException if xceiverClient initialization fails
    */
   private void checkStream() throws IOException {
-    if (this.byteBufStreamOutput == null) {
-      this.byteBufStreamOutput =
+    if (this.byteBufferStreamOutput == null) {
+      this.byteBufferStreamOutput =
           new BlockDataStreamOutput(blockID, xceiverClientManager,
               pipeline, config, token);
     }
   }
 
   @Override
-  public void write(ByteBuf b) throws IOException {
+  public void write(ByteBuffer b, int off, int len) throws IOException {
     checkStream();
-    final int len = b.readableBytes();
-    byteBufStreamOutput.write(b);
+    byteBufferStreamOutput.write(b, off, len);
     this.currentPosition += len;
   }
 
   @Override
   public void flush() throws IOException {
-    if (this.byteBufStreamOutput != null) {
-      this.byteBufStreamOutput.flush();
+    if (this.byteBufferStreamOutput != null) {
+      this.byteBufferStreamOutput.flush();
     }
   }
 
   @Override
   public void close() throws IOException {
-    if (this.byteBufStreamOutput != null) {
-      this.byteBufStreamOutput.close();
+    if (this.byteBufferStreamOutput != null) {
+      this.byteBufferStreamOutput.close();
       // after closing the chunkOutPutStream, blockId would have been
       // reconstructed with updated bcsId
       this.blockID =
-          ((BlockDataStreamOutput) byteBufStreamOutput).getBlockID();
+          ((BlockDataStreamOutput) byteBufferStreamOutput).getBlockID();
     }
   }
 
   boolean isClosed() {
-    if (byteBufStreamOutput != null) {
-      return  ((BlockDataStreamOutput) byteBufStreamOutput).isClosed();
+    if (byteBufferStreamOutput != null) {
+      return  ((BlockDataStreamOutput) byteBufferStreamOutput).isClosed();
     }
     return false;
   }
 
   Collection<DatanodeDetails> getFailedServers() {
-    if (byteBufStreamOutput != null) {
+    if (byteBufferStreamOutput != null) {
       BlockDataStreamOutput out =
-          (BlockDataStreamOutput) this.byteBufStreamOutput;
+          (BlockDataStreamOutput) this.byteBufferStreamOutput;
       return out.getFailedServers();
     }
     return Collections.emptyList();
   }
 
   long getWrittenDataLength() {
-    if (byteBufStreamOutput != null) {
+    if (byteBufferStreamOutput != null) {
       BlockDataStreamOutput out =
-          (BlockDataStreamOutput) this.byteBufStreamOutput;
+          (BlockDataStreamOutput) this.byteBufferStreamOutput;
       return out.getWrittenDataLength();
     } else {
       // For a pre allocated block for which no write has been initiated,
-      // the ByteBufStreamOutput will be null here.
+      // the ByteBufferStreamOutput will be null here.
       // In such cases, the default blockCommitSequenceId will be 0
       return 0;
     }
@@ -155,7 +154,7 @@ public final class BlockDataStreamOutputEntry
   void cleanup(boolean invalidateClient) throws IOException {
     checkStream();
     BlockDataStreamOutput out =
-        (BlockDataStreamOutput) this.byteBufStreamOutput;
+        (BlockDataStreamOutput) this.byteBufferStreamOutput;
     out.cleanup(invalidateClient);
 
   }
@@ -163,7 +162,7 @@ public final class BlockDataStreamOutputEntry
   void writeOnRetry(long len) throws IOException {
     checkStream();
     BlockDataStreamOutput out =
-        (BlockDataStreamOutput) this.byteBufStreamOutput;
+        (BlockDataStreamOutput) this.byteBufferStreamOutput;
     out.writeOnRetry(len);
     this.currentPosition += len;
 
@@ -231,8 +230,8 @@ public final class BlockDataStreamOutputEntry
   }
 
   @VisibleForTesting
-  public ByteBufStreamOutput getByteBufStreamOutput() {
-    return byteBufStreamOutput;
+  public ByteBufferStreamOutput getByteBufStreamOutput() {
+    return byteBufferStreamOutput;
   }
 
   public BlockID getBlockID() {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index c37f9cd51d..9bba89d0a8 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.client.io;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
-import io.netty.buffer.ByteBuf;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
@@ -32,7 +31,7 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
 import org.apache.hadoop.io.retry.RetryPolicies;
 import org.apache.hadoop.io.retry.RetryPolicy;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -48,6 +47,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.io.InterruptedIOException;
+import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.List;
 import java.util.Map;
@@ -63,7 +63,7 @@ import java.util.stream.Collectors;
  *
  * TODO : currently not support multi-thread access.
  */
-public class KeyDataStreamOutput implements ByteBufStreamOutput {
+public class KeyDataStreamOutput implements ByteBufferStreamOutput {
 
   private OzoneClientConfig config;
 
@@ -185,17 +185,16 @@ public class KeyDataStreamOutput implements ByteBufStreamOutput {
   }
 
   @Override
-  public void write(ByteBuf b) throws IOException {
+  public void write(ByteBuffer b, int off, int len) throws IOException {
     checkNotClosed();
     if (b == null) {
       throw new NullPointerException();
     }
-    final int len = b.readableBytes();
-    handleWrite(b, b.readerIndex(), len, false);
+    handleWrite(b, off, len, false);
     writeOffset += len;
   }
 
-  private void handleWrite(ByteBuf b, int off, long len, boolean retry)
+  private void handleWrite(ByteBuffer b, int off, long len, boolean retry)
       throws IOException {
     while (len > 0) {
       try {
@@ -227,7 +226,7 @@ public class KeyDataStreamOutput implements ByteBufStreamOutput {
   }
 
   private int writeToDataStreamOutput(BlockDataStreamOutputEntry current,
-      boolean retry, long len, ByteBuf b, int writeLen, int off,
+      boolean retry, long len, ByteBuffer b, int writeLen, int off,
       long currentPos) throws IOException {
     try {
       if (retry) {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
index 378b86872e..d40ac2b332 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/OzoneDataStreamOutput.java
@@ -16,55 +16,55 @@
  */
 package org.apache.hadoop.ozone.client.io;
 
-import io.netty.buffer.ByteBuf;
-import org.apache.hadoop.hdds.scm.storage.ByteBufStreamOutput;
+import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 
 /**
  * OzoneDataStreamOutput is used to write data into Ozone.
  * It uses SCM's {@link KeyDataStreamOutput} for writing the data.
  */
-public class OzoneDataStreamOutput implements ByteBufStreamOutput {
+public class OzoneDataStreamOutput implements ByteBufferStreamOutput {
 
-  private final ByteBufStreamOutput byteBufStreamOutput;
+  private final ByteBufferStreamOutput byteBufferStreamOutput;
 
   /**
    * Constructs OzoneDataStreamOutput with KeyDataStreamOutput.
    *
-   * @param byteBufStreamOutput
+   * @param byteBufferStreamOutput the underlying ByteBufferStreamOutput
    */
-  public OzoneDataStreamOutput(ByteBufStreamOutput byteBufStreamOutput) {
-    this.byteBufStreamOutput = byteBufStreamOutput;
+  public OzoneDataStreamOutput(ByteBufferStreamOutput byteBufferStreamOutput) {
+    this.byteBufferStreamOutput = byteBufferStreamOutput;
   }
 
   @Override
-  public void write(ByteBuf b) throws IOException {
-    byteBufStreamOutput.write(b);
+  public void write(ByteBuffer b, int off, int len) throws IOException {
+    byteBufferStreamOutput.write(b, off, len);
   }
 
   @Override
   public synchronized void flush() throws IOException {
-    byteBufStreamOutput.flush();
+    byteBufferStreamOutput.flush();
   }
 
   @Override
   public synchronized void close() throws IOException {
     //commitKey can be done here, if needed.
-    byteBufStreamOutput.close();
+    byteBufferStreamOutput.close();
   }
 
   public OmMultipartCommitUploadPartInfo getCommitUploadPartInfo() {
-    if (byteBufStreamOutput instanceof KeyDataStreamOutput) {
+    if (byteBufferStreamOutput instanceof KeyDataStreamOutput) {
       return ((KeyDataStreamOutput)
-              byteBufStreamOutput).getCommitUploadPartInfo();
+              byteBufferStreamOutput).getCommitUploadPartInfo();
     }
     // Otherwise return null.
     return null;
   }
 
-  public ByteBufStreamOutput getByteBufStreamOutput() {
-    return byteBufStreamOutput;
+  public ByteBufferStreamOutput getByteBufStreamOutput() {
+    return byteBufferStreamOutput;
   }
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 4d52d89490..6d5401d651 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -17,7 +17,6 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
-import io.netty.buffer.Unpooled;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -38,6 +37,7 @@ import org.junit.Test;
 import org.junit.rules.Timeout;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
@@ -126,48 +126,38 @@ public class TestBlockDataStreamOutput {
     }
   }
 
+  @Test
+  public void testHalfChunkWrite() throws Exception {
+    testWrite(chunkSize / 2);
+  }
+
+  @Test
+  public void testSingleChunkWrite() throws Exception {
+    testWrite(chunkSize);
+  }
+
   @Test
   public void testMultiChunkWrite() throws Exception {
-    // write data less than 1 chunk size use streaming.
-    String keyName1 = getKeyName();
-    OzoneDataStreamOutput key1 = createKey(
-        keyName1, ReplicationType.RATIS, 0);
-    int dataLength1 = chunkSize/2;
-    byte[] data1 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength1)
-            .getBytes(UTF_8);
-    key1.write(Unpooled.copiedBuffer(data1));
-    // now close the stream, It will update the key length.
-    key1.close();
-    validateData(keyName1, data1);
-
-    // write data more than 1 chunk size use streaming.
-    String keyName2 = getKeyName();
-    OzoneDataStreamOutput key2 = createKey(
-        keyName2, ReplicationType.RATIS, 0);
-    int dataLength2 = chunkSize + 50;
-    byte[] data2 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength2)
-            .getBytes(UTF_8);
-    key2.write(Unpooled.copiedBuffer(data2));
-    // now close the stream, It will update the key length.
-    key2.close();
-    validateData(keyName2, data2);
-
-    // write data more than 1 block size use streaming.
-    String keyName3 = getKeyName();
-    OzoneDataStreamOutput key3 = createKey(
-        keyName3, ReplicationType.RATIS, 0);
-    int dataLength3 = blockSize + 50;
-    byte[] data3 =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength3)
+    testWrite(chunkSize + 50);
+  }
+
+  @Test
+  public void testMultiBlockWrite() throws Exception {
+    testWrite(blockSize + 50);
+  }
+
+  private void testWrite(int dataLength) throws Exception {
+    String keyName = getKeyName();
+    OzoneDataStreamOutput key = createKey(
+        keyName, ReplicationType.RATIS, 0);
+    byte[] data =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
             .getBytes(UTF_8);
-    key3.write(Unpooled.copiedBuffer(data3));
+    key.write(ByteBuffer.wrap(data));
     // now close the stream, It will update the key length.
-    key3.close();
-    validateData(keyName3, data3);
+    key.close();
+    validateData(keyName, data);
   }
-
   private OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
       long size) throws Exception {
     return TestHelper.createStreamKey(
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
index e4b842eaca..c34783dbdd 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/shell/keys/PutKeyHandler.java
@@ -29,8 +29,6 @@ import java.nio.channels.FileChannel;
 import java.util.HashMap;
 import java.util.Map;
 
-import io.netty.buffer.ByteBuf;
-import io.netty.buffer.Unpooled;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.io.IOUtils;
@@ -117,10 +115,8 @@ public class PutKeyHandler extends KeyHandler {
         long off = 0;
         while (len > 0) {
           long writeLen = Math.min(len, chunkSize);
-          ByteBuffer segment =
-              ch.map(FileChannel.MapMode.READ_ONLY, off, writeLen);
-          ByteBuf buf = Unpooled.wrappedBuffer(segment);
-          out.write(buf);
+          ByteBuffer bb = ch.map(FileChannel.MapMode.READ_ONLY, off, writeLen);
+          out.write(bb);
           off += writeLen;
           len -= writeLen;
         }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 17/38: HDDS-5743. [Ozone-Streaming] Add option to write files via streaming api in ofs and o3fs. (#2770)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit f3a245157af1d162cb7d284a4f8fcea5eee3c825
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Fri Nov 19 12:14:05 2021 +0530

    HDDS-5743. [Ozone-Streaming] Add option to write files via streaming api in ofs and o3fs. (#2770)
    
    (cherry picked from commit 2020c2266e1a75d13350b92ec02e9197d15bfbb8)
    (cherry picked from commit 4194bd6ce077c9d05f3847b66a80bfe1ff3ad0c9)
---
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   |   7 ++
 .../common/src/main/resources/ozone-default.xml    |   7 ++
 .../apache/hadoop/ozone/client/OzoneBucket.java    |   8 ++
 .../ozone/client/protocol/ClientProtocol.java      |   5 +
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  19 ++++
 .../fs/ozone/BasicOzoneClientAdapterImpl.java      |  33 +++++++
 .../hadoop/fs/ozone/BasicOzoneFileSystem.java      |   8 ++
 .../ozone/BasicRootedOzoneClientAdapterImpl.java   |  39 ++++++++
 .../fs/ozone/BasicRootedOzoneFileSystem.java       |   8 ++
 .../apache/hadoop/fs/ozone/OzoneClientAdapter.java |   3 +
 .../hadoop/fs/ozone/OzoneFSDataStreamOutput.java   | 103 +++++++++++++++++++++
 11 files changed, 240 insertions(+)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index b9365d7e04..1528c2d854 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -97,6 +97,13 @@ public final class OzoneConfigKeys {
   public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT
       = 9855;
 
+  /**
+   * Flag to enable ratis streaming on filesystem writes.
+   */
+  public static final String OZONE_FS_DATASTREAM_ENABLE =
+      "ozone.fs.datastream.enable";
+  public static final boolean OZONE_FS_DATASTREAM_ENABLE_DEFAULT = false;
+
   /**
    * When set to true, allocate a random free port for ozone container, so that
    * a mini cluster is able to launch multiple containers on a node.
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 5f2743bbc5..69eed9cd0f 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -3378,5 +3378,12 @@
     </description>
   </property>
 
+    <name>ozone.fs.datastream.enable</name>
+    <value>false</value>
+    <tag>OZONE, DATANODE</tag>
+    <description>
+      To enable/disable filesystem write via ratis streaming.
+    </description>
+  </property>
 
 </configuration>
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 20d34df15e..d171a43f8a 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -955,6 +955,14 @@ public class OzoneBucket extends WithMetadata {
             overWrite, recursive);
   }
 
+  public OzoneDataStreamOutput createStreamFile(String keyName, long size,
+      ReplicationConfig replicationConfig, boolean overWrite,
+      boolean recursive) throws IOException {
+    return proxy
+        .createStreamFile(volumeName, name, keyName, size, replicationConfig,
+            overWrite, recursive);
+  }
+
   /**
    * List the status for a file or a directory and its contents.
    *
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 7660fc0e46..290c8db5ec 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -829,6 +829,11 @@ public interface ClientProtocol {
       String keyName, long size, ReplicationConfig replicationConfig,
       boolean overWrite, boolean recursive) throws IOException;
 
+  @SuppressWarnings("checkstyle:parameternumber")
+  OzoneDataStreamOutput createStreamFile(String volumeName, String bucketName,
+      String keyName, long size, ReplicationConfig replicationConfig,
+      boolean overWrite, boolean recursive) throws IOException;
+
 
   /**
    * List the status for a file or a directory and its contents.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index fcdf2b9107..b37b83f67e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1855,6 +1855,25 @@ public class RpcClient implements ClientProtocol {
         .build();
   }
 
+  @Override
+  public OzoneDataStreamOutput createStreamFile(String volumeName,
+      String bucketName, String keyName, long size,
+      ReplicationConfig replicationConfig, boolean overWrite, boolean recursive)
+      throws IOException {
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(size)
+        .setReplicationConfig(replicationConfig)
+        .setAcls(getAclList())
+        .setLatestVersionLocation(getLatestVersionLocation)
+        .build();
+    OpenKeySession keySession =
+        ozoneManagerClient.createFile(keyArgs, overWrite, recursive);
+    return createDataStreamOutput(keySession, UUID.randomUUID().toString(),
+        replicationConfig);
+  }
 
   @Override
   public List<OzoneFileStatus> listStatus(String volumeName, String bucketName,
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
index 25e4ffbb18..3265b80e7b 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneClientAdapterImpl.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.common.MonotonicClock;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -265,6 +266,38 @@ public class BasicOzoneClientAdapterImpl implements OzoneClientAdapter {
     return this.bucketReplicationConfig;
   }
 
+  @Override
+  public OzoneFSDataStreamOutput createStreamFile(String key, short replication,
+      boolean overWrite, boolean recursive) throws IOException {
+    incrementCounter(Statistic.OBJECTS_CREATED, 1);
+    try {
+      OzoneDataStreamOutput ozoneDataStreamOutput = null;
+      if (replication == ReplicationFactor.ONE.getValue()
+          || replication == ReplicationFactor.THREE.getValue()) {
+
+        ReplicationConfig customReplicationConfig =
+            ReplicationConfig.adjustReplication(bucketReplicationConfig,
+                replication, config);
+        ozoneDataStreamOutput = bucket
+            .createStreamFile(key, 0, customReplicationConfig, overWrite,
+                recursive);
+      } else {
+        ozoneDataStreamOutput = bucket.createStreamFile(
+            key, 0, bucketReplicationConfig, overWrite, recursive);
+      }
+      return new OzoneFSDataStreamOutput(
+          ozoneDataStreamOutput.getByteBufStreamOutput());
+    } catch (OMException ex) {
+      if (ex.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS
+          || ex.getResult() == OMException.ResultCodes.NOT_A_FILE) {
+        throw new FileAlreadyExistsException(
+            ex.getResult().name() + ": " + ex.getMessage());
+      } else {
+        throw ex;
+      }
+    }
+  }
+
   @Override
   public void renameKey(String key, String newKeyName) throws IOException {
     incrementCounter(Statistic.OBJECTS_RENAMED, 1);
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
index 2549412147..6ca32c4f3c 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicOzoneFileSystem.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.conf.ConfigurationSource;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
 import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -259,6 +260,13 @@ public class BasicOzoneFileSystem extends FileSystem {
 
   private FSDataOutputStream createOutputStream(String key, short replication,
       boolean overwrite, boolean recursive) throws IOException {
+    boolean isRatisStreamingEnabled = getConf().getBoolean(
+        OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE,
+        OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE_DEFAULT);
+    if (isRatisStreamingEnabled){
+      return new FSDataOutputStream(adapter.createStreamFile(key,
+          replication, overwrite, recursive), statistics);
+    }
     return new FSDataOutputStream(adapter.createFile(key,
         replication, overwrite, recursive), statistics);
   }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
index 724a76bb78..621ce0e5c1 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneClientAdapterImpl.java
@@ -61,6 +61,7 @@ import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneKey;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.BucketArgs;
@@ -392,6 +393,44 @@ public class BasicRootedOzoneClientAdapterImpl
     }
   }
 
+  @Override
+  public OzoneFSDataStreamOutput createStreamFile(String pathStr,
+      short replication, boolean overWrite, boolean recursive)
+      throws IOException {
+    incrementCounter(Statistic.OBJECTS_CREATED, 1);
+    OFSPath ofsPath = new OFSPath(pathStr);
+    if (ofsPath.isRoot() || ofsPath.isVolume() || ofsPath.isBucket()) {
+      throw new IOException("Cannot create file under root or volume.");
+    }
+    String key = ofsPath.getKeyName();
+    try {
+      // Hadoop CopyCommands class always sets recursive to true
+      OzoneBucket bucket = getBucket(ofsPath, recursive);
+      OzoneDataStreamOutput ozoneDataStreamOutput = null;
+      if (replication == ReplicationFactor.ONE.getValue()
+          || replication == ReplicationFactor.THREE.getValue()) {
+
+        ozoneDataStreamOutput = bucket.createStreamFile(key, 0,
+            ReplicationConfig.adjustReplication(
+                clientConfiguredReplicationConfig, replication, config),
+            overWrite, recursive);
+      } else {
+        ozoneDataStreamOutput = bucket.createStreamFile(
+            key, 0, clientConfiguredReplicationConfig, overWrite, recursive);
+      }
+      return new OzoneFSDataStreamOutput(
+          ozoneDataStreamOutput.getByteBufStreamOutput());
+    } catch (OMException ex) {
+      if (ex.getResult() == OMException.ResultCodes.FILE_ALREADY_EXISTS
+          || ex.getResult() == OMException.ResultCodes.NOT_A_FILE) {
+        throw new FileAlreadyExistsException(
+            ex.getResult().name() + ": " + ex.getMessage());
+      } else {
+        throw ex;
+      }
+    }
+  }
+
   @Override
   public void renameKey(String key, String newKeyName) throws IOException {
     throw new IOException("OFS doesn't support renameKey, use rename instead.");
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
index ea0a0020b1..b248abce60 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/BasicRootedOzoneFileSystem.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.conf.StorageUnit;
 import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
 import org.apache.hadoop.ozone.OFSPath;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneVolume;
 import org.apache.hadoop.ozone.om.exceptions.OMException;
@@ -239,6 +240,13 @@ public class BasicRootedOzoneFileSystem extends FileSystem {
 
   private FSDataOutputStream createOutputStream(String key, short replication,
       boolean overwrite, boolean recursive) throws IOException {
+    boolean isRatisStreamingEnabled = getConf().getBoolean(
+        OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE,
+        OzoneConfigKeys.OZONE_FS_DATASTREAM_ENABLE_DEFAULT);
+    if (isRatisStreamingEnabled){
+      return new FSDataOutputStream(adapter.createStreamFile(key,
+          replication, overwrite, recursive), statistics);
+    }
     return new FSDataOutputStream(adapter.createFile(key,
         replication, overwrite, recursive), statistics);
   }
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
index 31bf351f01..24566cb83f 100644
--- a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneClientAdapter.java
@@ -45,6 +45,9 @@ public interface OzoneClientAdapter {
   OzoneFSOutputStream createFile(String key, short replication,
       boolean overWrite, boolean recursive) throws IOException;
 
+  OzoneFSDataStreamOutput createStreamFile(String key, short replication,
+      boolean overWrite, boolean recursive) throws IOException;
+
   void renameKey(String key, String newKeyName) throws IOException;
 
   // Users should use rename instead of renameKey in OFS.
diff --git a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java
new file mode 100644
index 0000000000..515dbca92b
--- /dev/null
+++ b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFSDataStreamOutput.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+package org.apache.hadoop.fs.ozone;
+
+import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.ByteBuffer;
+
+/**
+ * The ByteBuffer output stream for Ozone file system.
+ */
+public class OzoneFSDataStreamOutput extends OutputStream
+    implements ByteBufferStreamOutput {
+
+  private final ByteBufferStreamOutput byteBufferStreamOutput;
+
+  public OzoneFSDataStreamOutput(
+      ByteBufferStreamOutput byteBufferStreamOutput) {
+    this.byteBufferStreamOutput = byteBufferStreamOutput;
+  }
+
+  /**
+   * Try to write the [off:off + len) slice in ByteBuf b to DataStream.
+   *
+   * @param b   the data.
+   * @param off the start offset in the data.
+   * @param len the number of bytes to write.
+   * @throws IOException if an I/O error occurs.
+   */
+  @Override
+  public void write(ByteBuffer b, int off, int len)
+      throws IOException {
+    byteBufferStreamOutput.write(b, off, len);
+  }
+
+  /**
+   * Writes the specified byte to this output stream. The general
+   * contract for <code>write</code> is that one byte is written
+   * to the output stream. The byte to be written is the eight
+   * low-order bits of the argument <code>b</code>. The 24
+   * high-order bits of <code>b</code> are ignored.
+   * <p>
+   * Subclasses of <code>OutputStream</code> must provide an
+   * implementation for this method.
+   *
+   * @param b the <code>byte</code>.
+   * @throws IOException if an I/O error occurs. In particular,
+   *                     an <code>IOException</code> may be thrown if the
+   *                     output stream has been closed.
+   */
+  @Override
+  public void write(int b) throws IOException {
+    byte[] singleBytes = new byte[1];
+    singleBytes[0] = (byte) b;
+    byteBufferStreamOutput.write(ByteBuffer.wrap(singleBytes));
+  }
+
+  /**
+   * Flushes this DataStream output and forces any buffered output bytes
+   * to be written out.
+   *
+   * @throws IOException if an I/O error occurs.
+   */
+  @Override
+  public void flush() throws IOException {
+    byteBufferStreamOutput.flush();
+  }
+
+  /**
+   * Closes this stream and releases any system resources associated
+   * with it. If the stream is already closed then invoking this
+   * method has no effect.
+   *
+   * <p> As noted in {@link AutoCloseable#close()}, cases where the
+   * close may fail require careful attention. It is strongly advised
+   * to relinquish the underlying resources and to internally
+   * <em>mark</em> the {@code Closeable} as closed, prior to throwing
+   * the {@code IOException}.
+   *
+   * @throws IOException if an I/O error occurs
+   */
+  @Override
+  public void close() throws IOException {
+    byteBufferStreamOutput.close();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 32/38: HDDS-6137. [Ozone-Streaming] Refactor KeyDataStreamOutput. (#3195)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 99b414df25ff29d4ec357e5cb96ce08ee4627611
Author: hao guo <gu...@360.cn>
AuthorDate: Mon Mar 28 16:16:31 2022 +0800

    HDDS-6137. [Ozone-Streaming] Refactor KeyDataStreamOutput. (#3195)
    
    (cherry picked from commit 88c4d59d9b780ad29cbf0bd1f7a3981a3b697c98)
    (cherry picked from commit 0a432aa0eb57bd25d63e656d0ec6e96a05689ac7)
---
 .../hdds/scm/storage/AbstractDataStreamOutput.java | 130 +++++++++++++++++++++
 .../hdds/scm/storage/BlockDataStreamOutput.java    |  36 +-----
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  32 +++++
 .../ozone/container/ContainerTestHelper.java       |  12 ++
 .../server/ratis/ContainerStateMachine.java        |  35 ++++--
 .../ozone/container/keyvalue/KeyValueHandler.java  |  33 +++---
 .../keyvalue/impl/KeyValueStreamDataChannel.java   |   2 +-
 .../client/io/BlockDataStreamOutputEntryPool.java  |  26 -----
 .../ozone/client/io/KeyDataStreamOutput.java       | 121 ++-----------------
 .../client/rpc/TestBlockDataStreamOutput.java      |   4 +-
 .../rpc/TestContainerStateMachineStream.java       |  59 ++++++++--
 11 files changed, 279 insertions(+), 211 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
new file mode 100644
index 0000000000..e29670d781
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/AbstractDataStreamOutput.java
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.ratis.protocol.exceptions.AlreadyClosedException;
+import org.apache.ratis.protocol.exceptions.RaftRetryFailureException;
+
+import java.io.IOException;
+import java.io.InterruptedIOException;
+import java.util.Map;
+
+/**
+ * This class is used for error handling methods.
+ */
+public abstract class AbstractDataStreamOutput
+    implements ByteBufferStreamOutput {
+
+  private final Map<Class<? extends Throwable>, RetryPolicy> retryPolicyMap;
+  private int retryCount;
+  private boolean isException;
+
+  protected AbstractDataStreamOutput(
+      Map<Class<? extends Throwable>, RetryPolicy> retryPolicyMap) {
+    this.retryPolicyMap = retryPolicyMap;
+    this.isException = false;
+    this.retryCount = 0;
+  }
+
+  @VisibleForTesting
+  public int getRetryCount() {
+    return retryCount;
+  }
+
+  protected void resetRetryCount() {
+    retryCount = 0;
+  }
+
+  protected boolean isException() {
+    return isException;
+  }
+
+  /**
+   * Checks if the provided exception signifies retry failure in ratis client.
+   * In case of retry failure, ratis client throws RaftRetryFailureException
+   * and all succeeding operations are failed with AlreadyClosedException.
+   */
+  protected boolean checkForRetryFailure(Throwable t) {
+    return t instanceof RaftRetryFailureException
+        || t instanceof AlreadyClosedException;
+  }
+
+  // Every container specific exception from datatnode will be seen as
+  // StorageContainerException
+  protected boolean checkIfContainerToExclude(Throwable t) {
+    return t instanceof StorageContainerException;
+  }
+
+  protected void setExceptionAndThrow(IOException ioe) throws IOException {
+    isException = true;
+    throw ioe;
+  }
+
+  protected void handleRetry(IOException exception) throws IOException {
+    RetryPolicy retryPolicy = retryPolicyMap
+        .get(HddsClientUtils.checkForException(exception).getClass());
+    if (retryPolicy == null) {
+      retryPolicy = retryPolicyMap.get(Exception.class);
+    }
+    handleRetry(exception, retryPolicy);
+  }
+
+  protected void handleRetry(IOException exception, RetryPolicy retryPolicy)
+      throws IOException {
+    RetryPolicy.RetryAction action = null;
+    try {
+      action = retryPolicy.shouldRetry(exception, retryCount, 0, true);
+    } catch (Exception e) {
+      setExceptionAndThrow(new IOException(e));
+    }
+    if (action != null &&
+        action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
+      String msg = "";
+      if (action.reason != null) {
+        msg = "Retry request failed. " + action.reason;
+      }
+      setExceptionAndThrow(new IOException(msg, exception));
+    }
+
+    // Throw the exception if the thread is interrupted
+    if (Thread.currentThread().isInterrupted()) {
+      setExceptionAndThrow(exception);
+    }
+    Preconditions.checkNotNull(action);
+    Preconditions.checkArgument(
+        action.action == RetryPolicy.RetryAction.RetryDecision.RETRY);
+    if (action.delayMillis > 0) {
+      try {
+        Thread.sleep(action.delayMillis);
+      } catch (InterruptedException e) {
+        Thread.currentThread().interrupt();
+        IOException ioe = (IOException) new InterruptedIOException(
+            "Interrupted: action=" + action + ", retry policy=" + retryPolicy)
+            .initCause(e);
+        setExceptionAndThrow(ioe);
+      }
+    }
+    retryCount++;
+  }
+}
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 3df5eb0e12..d5b9dd9d81 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -44,8 +45,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.ratis.client.api.DataStreamOutput;
 import org.apache.ratis.io.StandardWriteOption;
 import org.apache.ratis.protocol.DataStreamReply;
-import org.apache.ratis.protocol.RaftPeerId;
-import org.apache.ratis.protocol.RoutingTable;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -208,44 +207,13 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     if (isDatastreamPipelineMode) {
       return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
           .stream(message.getContent().asReadOnlyByteBuffer(),
-              getRoutingTable(pipeline));
+              RatisHelper.getRoutingTable(pipeline));
     } else {
       return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
           .stream(message.getContent().asReadOnlyByteBuffer());
     }
   }
 
-  public RoutingTable getRoutingTable(Pipeline pipeline) {
-    RaftPeerId primaryId = null;
-    List<RaftPeerId> raftPeers = new ArrayList<>();
-
-    for (DatanodeDetails dn : pipeline.getNodes()) {
-      final RaftPeerId raftPeerId = RaftPeerId.valueOf(dn.getUuidString());
-      try {
-        if (dn == pipeline.getFirstNode()) {
-          primaryId = raftPeerId;
-        }
-      } catch (IOException e) {
-        LOG.error("Can not get FirstNode from the pipeline: {} with " +
-            "exception: {}", pipeline.toString(), e.getLocalizedMessage());
-        return null;
-      }
-      raftPeers.add(raftPeerId);
-    }
-
-    RoutingTable.Builder builder = RoutingTable.newBuilder();
-    RaftPeerId previousId = primaryId;
-    for (RaftPeerId peerId : raftPeers) {
-      if (peerId.equals(primaryId)) {
-        continue;
-      }
-      builder.addSuccessor(previousId, peerId);
-      previousId = peerId;
-    }
-
-    return builder.build();
-  }
-
   public BlockID getBlockID() {
     return blockID.get();
   }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index c50a184285..e431c67df7 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -55,6 +55,7 @@ import org.apache.ratis.protocol.RaftGroup;
 import org.apache.ratis.protocol.RaftGroupId;
 import org.apache.ratis.protocol.RaftPeer;
 import org.apache.ratis.protocol.RaftPeerId;
+import org.apache.ratis.protocol.RoutingTable;
 import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
@@ -425,4 +426,35 @@ public final class RatisHelper {
       throw new RuntimeException(e);
     }
   }
+  
+  public static RoutingTable getRoutingTable(Pipeline pipeline) {
+    RaftPeerId primaryId = null;
+    List<RaftPeerId> raftPeers = new ArrayList<>();
+
+    for (DatanodeDetails dn : pipeline.getNodes()) {
+      final RaftPeerId raftPeerId = RaftPeerId.valueOf(dn.getUuidString());
+      try {
+        if (dn == pipeline.getFirstNode()) {
+          primaryId = raftPeerId;
+        }
+      } catch (IOException e) {
+        LOG.error("Can not get FirstNode from the pipeline: {} with " +
+            "exception: {}", pipeline.toString(), e.getLocalizedMessage());
+        return null;
+      }
+      raftPeers.add(raftPeerId);
+    }
+
+    RoutingTable.Builder builder = RoutingTable.newBuilder();
+    RaftPeerId previousId = primaryId;
+    for (RaftPeerId peerId : raftPeers) {
+      if (peerId.equals(primaryId)) {
+        continue;
+      }
+      builder.addSuccessor(previousId, peerId);
+      previousId = peerId;
+    }
+
+    return builder.build();
+  }
 }
diff --git a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
index 09fff2371e..14ccad18b9 100644
--- a/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
+++ b/hadoop-hdds/common/src/test/java/org/apache/hadoop/ozone/container/ContainerTestHelper.java
@@ -546,6 +546,18 @@ public final class ContainerTestHelper {
     return String.format("%1$" + length + "s", string);
   }
 
+  public static byte[] generateData(int length, boolean random) {
+    final byte[] data = new byte[length];
+    if (random) {
+      ThreadLocalRandom.current().nextBytes(data);
+    } else {
+      for (int i = 0; i < length; i++) {
+        data[i] = (byte) i;
+      }
+    }
+    return data;
+  }
+
   /**
    * Construct fake protobuf messages for various types of requests.
    * This is tedious, however necessary to test. Protobuf classes are final
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 3ef9477d97..916d3e7f5b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -62,6 +62,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.utils.BufferUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeConfiguration;
+import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel;
 import org.apache.hadoop.ozone.container.ozoneimpl.ContainerController;
 import org.apache.hadoop.util.Time;
 
@@ -549,19 +550,29 @@ public class ContainerStateMachine extends BaseStateMachine {
 
   @Override
   public CompletableFuture<?> link(DataStream stream, LogEntryProto entry) {
-    return CompletableFuture.supplyAsync(() -> {
-      if (stream == null) {
-        return JavaUtils.completeExceptionally(
-            new IllegalStateException("DataStream is null"));
-      }
-      if (stream.getDataChannel().isOpen()) {
-        return JavaUtils.completeExceptionally(
-            new IllegalStateException(
-                "DataStream: " + stream + " is not closed properly"));
-      } else {
-        return CompletableFuture.completedFuture(null);
+    if (stream == null) {
+      return JavaUtils.completeExceptionally(new IllegalStateException(
+          "DataStream is null"));
+    }
+    final DataChannel dataChannel = stream.getDataChannel();
+    if (dataChannel.isOpen()) {
+      return JavaUtils.completeExceptionally(new IllegalStateException(
+          "DataStream: " + stream + " is not closed properly"));
+    }
+
+    final CompletableFuture<ContainerCommandResponseProto> f;
+    if (dataChannel instanceof KeyValueStreamDataChannel) {
+      f = CompletableFuture.completedFuture(null);
+    } else {
+      return JavaUtils.completeExceptionally(new IllegalStateException(
+          "Unexpected DataChannel " + dataChannel.getClass()));
+    }
+    return f.whenComplete((res, e) -> {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("PutBlock {} Term: {} Index: {}",
+            res.getResult(), entry.getTerm(), entry.getIndex());
       }
-    }, executor);
+    });
   }
 
   private ExecutorService getChunkExecutor(WriteChunkRequestProto req) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index eafea02216..c387eb5af6 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -185,13 +185,21 @@ public class KeyValueHandler extends Handler {
 
   @Override
   public StateMachine.DataChannel getStreamDataChannel(
-          Container container, ContainerCommandRequestProto msg)
-          throws StorageContainerException {
+      Container container, ContainerCommandRequestProto msg)
+      throws StorageContainerException {
     KeyValueContainer kvContainer = (KeyValueContainer) container;
     checkContainerOpen(kvContainer);
-    BlockID blockID = BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID());
-    return chunkManager.getStreamDataChannel(kvContainer,
-            blockID, metrics);
+
+    if (msg.hasWriteChunk()) {
+      BlockID blockID =
+          BlockID.getFromProtobuf(msg.getWriteChunk().getBlockID());
+
+      return chunkManager.getStreamDataChannel(kvContainer,
+          blockID, metrics);
+    } else {
+      throw new StorageContainerException("Malformed request.",
+          ContainerProtos.Result.IO_EXCEPTION);
+    }
   }
 
   @Override
@@ -274,10 +282,14 @@ public class KeyValueHandler extends Handler {
   ContainerCommandResponseProto handleStreamInit(
       ContainerCommandRequestProto request, KeyValueContainer kvContainer,
       DispatcherContext dispatcherContext) {
-    if (!request.hasWriteChunk()) {
+    final BlockID blockID;
+    if (request.hasWriteChunk()) {
+      WriteChunkRequestProto writeChunk = request.getWriteChunk();
+      blockID = BlockID.getFromProtobuf(writeChunk.getBlockID());
+    } else {
       if (LOG.isDebugEnabled()) {
-        LOG.debug("Malformed Write Chunk request. trace ID: {}",
-            request.getTraceID());
+        LOG.debug("Malformed {} request. trace ID: {}",
+            request.getCmdType(), request.getTraceID());
       }
       return malformedRequest(request);
     }
@@ -285,13 +297,8 @@ public class KeyValueHandler extends Handler {
     String path = null;
     try {
       checkContainerOpen(kvContainer);
-
-      WriteChunkRequestProto writeChunk = request.getWriteChunk();
-      BlockID blockID = BlockID.getFromProtobuf(writeChunk.getBlockID());
-
       path = chunkManager
           .streamInit(kvContainer, blockID);
-
     } catch (StorageContainerException ex) {
       return ContainerUtils.logAndReturnError(LOG, ex, request);
     }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
index 14ead4ea86..66723031f0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
@@ -28,7 +28,7 @@ import java.io.File;
 /**
  * This class is used to get the DataChannel for streaming.
  */
-class KeyValueStreamDataChannel extends StreamDataChannelBase {
+public class KeyValueStreamDataChannel extends StreamDataChannelBase {
   KeyValueStreamDataChannel(File file, ContainerData containerData,
                             ContainerMetrics metrics)
       throws StorageContainerException {
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 00cda7844a..e51242cc10 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -18,10 +18,8 @@
  */
 package org.apache.hadoop.ozone.client.io;
 
-import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
@@ -88,30 +86,6 @@ public class BlockDataStreamOutputEntryPool {
     this.bufferList = new ArrayList<>();
   }
 
-  /**
-   * A constructor for testing purpose only.
-   *
-   * @see KeyDataStreamOutput#KeyDataStreamOutput()
-   */
-  @VisibleForTesting
-  BlockDataStreamOutputEntryPool() {
-    streamEntries = new ArrayList<>();
-    omClient = null;
-    keyArgs = null;
-    xceiverClientFactory = null;
-    config =
-        new OzoneConfiguration().getObject(OzoneClientConfig.class);
-    config.setStreamBufferSize(0);
-    config.setStreamBufferMaxSize(0);
-    config.setStreamBufferFlushSize(0);
-    config.setStreamBufferFlushDelay(false);
-    requestID = null;
-    int chunkSize = 0;
-    currentStreamIndex = 0;
-    openID = -1;
-    excludeList = new ExcludeList();
-  }
-
   /**
    * When a key is opened, it is possible that there are some blocks already
    * allocated to it for this open session. In this case, to make use of these
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index 2540e42e24..dc5c3a016d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -28,31 +28,22 @@ import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.client.HddsClientUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
-import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
-import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.hdds.scm.storage.AbstractDataStreamOutput;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
 import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
 import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
 import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.apache.ratis.protocol.exceptions.AlreadyClosedException;
-import org.apache.ratis.protocol.exceptions.RaftRetryFailureException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.List;
-import java.util.Map;
-import java.util.function.Function;
-import java.util.stream.Collectors;
 
 /**
  * Maintaining a list of BlockInputStream. Write based on offset.
@@ -63,7 +54,7 @@ import java.util.stream.Collectors;
  *
  * TODO : currently not support multi-thread access.
  */
-public class KeyDataStreamOutput implements ByteBufferStreamOutput {
+public class KeyDataStreamOutput extends AbstractDataStreamOutput {
 
   private OzoneClientConfig config;
 
@@ -79,34 +70,16 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
 
   private boolean closed;
   private FileEncryptionInfo feInfo;
-  private final Map<Class<? extends Throwable>, RetryPolicy> retryPolicyMap;
-  private int retryCount;
+
   // how much of data is actually written yet to underlying stream
   private long offset;
   // how much data has been ingested into the stream
   private long writeOffset;
-  // whether an exception is encountered while write and whole write could
-  // not succeed
-  private boolean isException;
+
   private final BlockDataStreamOutputEntryPool blockDataStreamOutputEntryPool;
 
   private long clientID;
 
-  /**
-   * A constructor for testing purpose only.
-   */
-  @VisibleForTesting
-  public KeyDataStreamOutput() {
-    closed = false;
-    this.retryPolicyMap = HddsClientUtils.getExceptionList()
-        .stream()
-        .collect(Collectors.toMap(Function.identity(),
-            e -> RetryPolicies.TRY_ONCE_THEN_FAIL));
-    retryCount = 0;
-    offset = 0;
-    blockDataStreamOutputEntryPool = new BlockDataStreamOutputEntryPool();
-  }
-
   @VisibleForTesting
   public List<BlockDataStreamOutputEntry> getStreamEntries() {
     return blockDataStreamOutputEntryPool.getStreamEntries();
@@ -122,11 +95,6 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
     return blockDataStreamOutputEntryPool.getLocationInfoList();
   }
 
-  @VisibleForTesting
-  public int getRetryCount() {
-    return retryCount;
-  }
-
   @VisibleForTesting
   public long getClientID() {
     return clientID;
@@ -142,6 +110,8 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
       String uploadID, int partNumber, boolean isMultipart,
       boolean unsafeByteBufferConversion
   ) {
+    super(HddsClientUtils.getRetryPolicyByException(
+        config.getMaxRetryCount(), config.getRetryInterval()));
     this.config = config;
     OmKeyInfo info = handler.getKeyInfo();
     blockDataStreamOutputEntryPool =
@@ -158,10 +128,6 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
     // Retrieve the file encryption key info, null if file is not in
     // encrypted bucket.
     this.feInfo = info.getFileEncryptionInfo();
-    this.retryPolicyMap = HddsClientUtils.getRetryPolicyByException(
-        config.getMaxRetryCount(), config.getRetryInterval());
-    this.retryCount = 0;
-    this.isException = false;
     this.writeOffset = 0;
     this.clientID = handler.getId();
   }
@@ -322,9 +288,10 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
     if (bufferedDataLen > 0) {
       // If the data is still cached in the underlying stream, we need to
       // allocate new block and write this data in the datanode.
-      handleRetry(exception, bufferedDataLen);
+      handleRetry(exception);
+      handleWrite(null, 0, bufferedDataLen, true);
       // reset the retryCount after handling the exception
-      retryCount = 0;
+      resetRetryCount();
     }
   }
 
@@ -333,74 +300,6 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
     closed = true;
   }
 
-  private void handleRetry(IOException exception, long len) throws IOException {
-    RetryPolicy retryPolicy = retryPolicyMap
-        .get(HddsClientUtils.checkForException(exception).getClass());
-    if (retryPolicy == null) {
-      retryPolicy = retryPolicyMap.get(Exception.class);
-    }
-    RetryPolicy.RetryAction action = null;
-    try {
-      action = retryPolicy.shouldRetry(exception, retryCount, 0, true);
-    } catch (Exception e) {
-      setExceptionAndThrow(new IOException(e));
-    }
-    if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
-      String msg = "";
-      if (action.reason != null) {
-        msg = "Retry request failed. " + action.reason;
-        LOG.error(msg, exception);
-      }
-      setExceptionAndThrow(new IOException(msg, exception));
-    }
-
-    // Throw the exception if the thread is interrupted
-    if (Thread.currentThread().isInterrupted()) {
-      LOG.warn("Interrupted while trying for retry");
-      setExceptionAndThrow(exception);
-    }
-    Preconditions.checkArgument(
-        action.action == RetryPolicy.RetryAction.RetryDecision.RETRY);
-    if (action.delayMillis > 0) {
-      try {
-        Thread.sleep(action.delayMillis);
-      } catch (InterruptedException e) {
-        Thread.currentThread().interrupt();
-        IOException ioe =  (IOException) new InterruptedIOException(
-            "Interrupted: action=" + action + ", retry policy=" + retryPolicy)
-            .initCause(e);
-        setExceptionAndThrow(ioe);
-      }
-    }
-    retryCount++;
-    if (LOG.isTraceEnabled()) {
-      LOG.trace("Retrying Write request. Already tried {} time(s); " +
-          "retry policy is {} ", retryCount, retryPolicy);
-    }
-    handleWrite(null, 0, len, true);
-  }
-
-  private void setExceptionAndThrow(IOException ioe) throws IOException {
-    isException = true;
-    throw ioe;
-  }
-
-  /**
-   * Checks if the provided exception signifies retry failure in ratis client.
-   * In case of retry failure, ratis client throws RaftRetryFailureException
-   * and all succeeding operations are failed with AlreadyClosedException.
-   */
-  private boolean checkForRetryFailure(Throwable t) {
-    return t instanceof RaftRetryFailureException
-        || t instanceof AlreadyClosedException;
-  }
-
-  // Every container specific exception from datatnode will be seen as
-  // StorageContainerException
-  private boolean checkIfContainerToExclude(Throwable t) {
-    return t instanceof StorageContainerException;
-  }
-
   @Override
   public void flush() throws IOException {
     checkNotClosed();
@@ -485,7 +384,7 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
     closed = true;
     try {
       handleFlushOrClose(StreamAction.CLOSE);
-      if (!isException) {
+      if (!isException()) {
         Preconditions.checkArgument(writeOffset == offset);
       }
       blockDataStreamOutputEntryPool.commitKey(offset);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 6225e25268..65f7348740 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -161,7 +161,7 @@ public class TestBlockDataStreamOutput {
   private void testWrite(int dataLength) throws Exception {
     String keyName = getKeyName();
     OzoneDataStreamOutput key = createKey(
-        keyName, ReplicationType.RATIS, 0);
+        keyName, ReplicationType.RATIS, dataLength);
     byte[] data =
         ContainerTestHelper.getFixedLengthString(keyString, dataLength)
             .getBytes(UTF_8);
@@ -174,7 +174,7 @@ public class TestBlockDataStreamOutput {
   private void testWriteWithFailure(int dataLength) throws Exception {
     String keyName = getKeyName();
     OzoneDataStreamOutput key = createKey(
-        keyName, ReplicationType.RATIS, 0);
+        keyName, ReplicationType.RATIS, dataLength);
     byte[] data =
         ContainerTestHelper.getFixedLengthString(keyString, dataLength)
             .getBytes(UTF_8);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index f4c756bccd..ad9eca6af7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -17,7 +17,7 @@
 
 package org.apache.hadoop.ozone.client.rpc;
 
-
+import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.DatanodeRatisServerConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -46,10 +46,8 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.time.Duration;
 import java.util.List;
-import java.util.UUID;
 import java.util.concurrent.TimeUnit;
 
-import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_COMMAND_STATUS_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
 import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
@@ -74,6 +72,11 @@ public class TestContainerStateMachineStream {
   private String volumeName;
   private String bucketName;
 
+  private static final int CHUNK_SIZE = 100;
+  private static final int FLUSH_SIZE = 2 * CHUNK_SIZE;
+  private static final int MAX_FLUSH_SIZE = 2 * FLUSH_SIZE;
+  private static final int BLOCK_SIZE = 2 * MAX_FLUSH_SIZE;
+
   /**
    * Create a MiniDFSCluster for testing.
    *
@@ -118,8 +121,15 @@ public class TestContainerStateMachineStream {
     conf.setLong(OzoneConfigKeys.DFS_RATIS_SNAPSHOT_THRESHOLD_KEY, 1);
     conf.setQuietMode(false);
     cluster =
-        MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setHbInterval(200)
+        MiniOzoneCluster.newBuilder(conf)
+            .setNumDatanodes(3)
+            .setHbInterval(200)
             .setDataStreamMinPacketSize(1024)
+            .setBlockSize(BLOCK_SIZE)
+            .setChunkSize(CHUNK_SIZE)
+            .setStreamBufferFlushSize(FLUSH_SIZE)
+            .setStreamBufferMaxSize(MAX_FLUSH_SIZE)
+            .setStreamBufferSizeUnit(StorageUnit.BYTES)
             .build();
     cluster.waitForClusterToBeReady();
     cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
@@ -146,20 +156,14 @@ public class TestContainerStateMachineStream {
 
   @Test
   public void testContainerStateMachineForStreaming() throws Exception {
-    long size = 1024 * 8;
+    long size = CHUNK_SIZE + 1;
 
     OzoneDataStreamOutput key = TestHelper.createStreamKey(
         "ozone-stream-test.txt", ReplicationType.RATIS, size, objectStore,
         volumeName, bucketName);
 
-    byte[] data =
-        ContainerTestHelper
-            .getFixedLengthString(UUID.randomUUID().toString(),
-                (int) (size / 2))
-            .getBytes(UTF_8);
+    byte[] data = ContainerTestHelper.generateData((int) size, true);
     key.write(ByteBuffer.wrap(data));
-    key.write(ByteBuffer.wrap(data));
-
     key.flush();
 
     KeyDataStreamOutput streamOutput =
@@ -181,4 +185,35 @@ public class TestContainerStateMachineStream {
     Assert.assertTrue(bytesUsed == size);
   }
 
+
+  @Test
+  public void testContainerStateMachineForStreamingSmallFile()
+      throws Exception {
+    long size = CHUNK_SIZE - 1;
+
+    OzoneDataStreamOutput key = TestHelper.createStreamKey(
+        "ozone-stream-test-small-file.txt", ReplicationType.RATIS, size,
+        objectStore, volumeName, bucketName);
+
+    byte[] data = ContainerTestHelper.generateData((int) size, true);
+    key.write(ByteBuffer.wrap(data));
+    key.flush();
+
+    KeyDataStreamOutput streamOutput =
+        (KeyDataStreamOutput) key.getByteBufStreamOutput();
+    List<OmKeyLocationInfo> locationInfoList =
+        streamOutput.getLocationInfoList();
+    key.close();
+    OmKeyLocationInfo omKeyLocationInfo = locationInfoList.get(0);
+    HddsDatanodeService dn = TestHelper.getDatanodeService(omKeyLocationInfo,
+        cluster);
+
+    long bytesUsed = dn.getDatanodeStateMachine()
+        .getContainer().getContainerSet()
+        .getContainer(omKeyLocationInfo.getContainerID()).
+            getContainerData().getBytesUsed();
+
+    Assert.assertTrue(bytesUsed == size);
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 33/38: HDDS-6500. [Ozone-Streaming] Buffer the PutBlockRequest at the end of the stream. (#3229)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit f2d574ce3e4a9b64b51558be61b388106020d3df
Author: Tsz-Wo Nicholas Sze <sz...@apache.org>
AuthorDate: Mon Mar 28 20:43:38 2022 +0800

    HDDS-6500. [Ozone-Streaming] Buffer the PutBlockRequest at the end of the stream. (#3229)
    
    (cherry picked from commit d72a8a09d573d63e2069851fd621ea78ad10aedd)
    (cherry picked from commit 1dddbec2524ca3463aa6140f33c6f16f0468fce8)
---
 .../hdds/scm/storage/BlockDataStreamOutput.java    |  48 +++-
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  |  26 ++
 .../hdds/scm/storage/ContainerProtocolCalls.java   |  14 +-
 .../server/ratis/ContainerStateMachine.java        |  27 +-
 .../keyvalue/impl/KeyValueStreamDataChannel.java   | 235 ++++++++++++++++
 .../keyvalue/impl/StreamDataChannelBase.java       |   3 +-
 .../impl/TestKeyValueStreamDataChannel.java        | 313 +++++++++++++++++++++
 .../client/rpc/TestBlockDataStreamOutput.java      |  16 +-
 8 files changed, 657 insertions(+), 25 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index d5b9dd9d81..d19f2aea13 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChunkInfo;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
@@ -83,6 +84,9 @@ import static org.apache.hadoop.hdds.scm.storage.ContainerProtocolCalls.putBlock
 public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   public static final Logger LOG =
       LoggerFactory.getLogger(BlockDataStreamOutput.class);
+
+  public static final int PUT_BLOCK_REQUEST_LENGTH_MAX = 1 << 20;  // 1MB
+
   public static final String EXCEPTION_MSG =
       "Unexpected Storage Container Exception: ";
   private static final CompletableFuture[] EMPTY_FUTURE_ARRAY = {};
@@ -406,12 +410,26 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       byteBufferList = null;
     }
     waitFuturesComplete();
+    final BlockData blockData = containerBlockData.build();
     if (close) {
-      dataStreamCloseReply = out.closeAsync();
+      final ContainerCommandRequestProto putBlockRequest
+          = ContainerProtocolCalls.getPutBlockRequest(
+              xceiverClient.getPipeline(), blockData, true, token);
+      dataStreamCloseReply = executePutBlockClose(putBlockRequest,
+          PUT_BLOCK_REQUEST_LENGTH_MAX, out);
+      dataStreamCloseReply.whenComplete((reply, e) -> {
+        if (e != null || reply == null || !reply.isSuccess()) {
+          LOG.warn("Failed executePutBlockClose, reply=" + reply, e);
+          try {
+            executePutBlock(true, false);
+          } catch (IOException ex) {
+            throw new CompletionException(ex);
+          }
+        }
+      });
     }
 
     try {
-      BlockData blockData = containerBlockData.build();
       XceiverClientReply asyncReply =
           putBlockAsync(xceiverClient, blockData, close, token);
       final CompletableFuture<ContainerCommandResponseProto> flushFuture
@@ -459,6 +477,30 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     }
   }
 
+  public static CompletableFuture<DataStreamReply> executePutBlockClose(
+      ContainerCommandRequestProto putBlockRequest, int max,
+      DataStreamOutput out) {
+    final ByteBuffer putBlock = ContainerCommandRequestMessage.toMessage(
+        putBlockRequest, null).getContent().asReadOnlyByteBuffer();
+    final ByteBuffer protoLength = getProtoLength(putBlock, max);
+    RatisHelper.debug(putBlock, "putBlock", LOG);
+    out.writeAsync(putBlock);
+    RatisHelper.debug(protoLength, "protoLength", LOG);
+    return out.writeAsync(protoLength, StandardWriteOption.CLOSE);
+  }
+
+  public static ByteBuffer getProtoLength(ByteBuffer putBlock, int max) {
+    final int protoLength = putBlock.remaining();
+    Preconditions.checkState(protoLength <= max,
+        "protoLength== %s > max = %s", protoLength, max);
+    final ByteBuffer buffer = ByteBuffer.allocate(4);
+    buffer.putInt(protoLength);
+    buffer.flip();
+    LOG.debug("protoLength = {}", protoLength);
+    Preconditions.checkState(buffer.remaining() == 4);
+    return buffer.asReadOnlyBuffer();
+  }
+
   @Override
   public void flush() throws IOException {
     if (xceiverClientFactory != null && xceiverClient != null
@@ -547,7 +589,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   }
 
 
-  private void setIoException(Exception e) {
+  private void setIoException(Throwable e) {
     IOException ioe = getIoException();
     if (ioe == null) {
       IOException exception =  new IOException(EXCEPTION_MSG + e.toString(), e);
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index e431c67df7..5b7ecb0c6b 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdds.ratis;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.security.cert.X509Certificate;
 import java.util.ArrayList;
 import java.util.Collection;
@@ -60,6 +61,7 @@ import org.apache.ratis.retry.RetryPolicy;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
 import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -457,4 +459,28 @@ public final class RatisHelper {
 
     return builder.build();
   }
+
+  public static void debug(ByteBuffer buffer, String name, Logger log) {
+    if (!log.isDebugEnabled()) {
+      return;
+    }
+    buffer = buffer.duplicate();
+    final StringBuilder builder = new StringBuilder();
+    for (int i = 1; buffer.remaining() > 0; i++) {
+      builder.append(buffer.get()).append(i % 20 == 0 ? "\n  " : ", ");
+    }
+    log.debug("{}: {}\n  {}", name, buffer, builder);
+  }
+
+  public static void debug(ByteBuf buf, String name, Logger log) {
+    if (!log.isDebugEnabled()) {
+      return;
+    }
+    buf = buf.duplicate();
+    final StringBuilder builder = new StringBuilder();
+    for (int i = 1; buf.readableBytes() > 0; i++) {
+      builder.append(buf.readByte()).append(i % 20 == 0 ? "\n  " : ", ");
+    }
+    log.debug("{}: {}\n  {}", name, buf, builder);
+  }
 }
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
index e024d79b9a..d0999268be 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java
@@ -56,6 +56,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientSpi;
 import org.apache.hadoop.hdds.scm.container.common.helpers.BlockNotCommittedException;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerNotOpenException;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.common.ChecksumData;
@@ -233,11 +234,19 @@ public final class ContainerProtocolCalls  {
       BlockData containerBlockData, boolean eof,
       Token<? extends TokenIdentifier> token)
       throws IOException, InterruptedException, ExecutionException {
+    final ContainerCommandRequestProto request = getPutBlockRequest(
+        xceiverClient.getPipeline(), containerBlockData, eof, token);
+    return xceiverClient.sendCommandAsync(request);
+  }
+
+  public static ContainerCommandRequestProto getPutBlockRequest(
+      Pipeline pipeline, BlockData containerBlockData, boolean eof,
+      Token<? extends TokenIdentifier> token) throws IOException {
     PutBlockRequestProto.Builder createBlockRequest =
         PutBlockRequestProto.newBuilder()
             .setBlockData(containerBlockData)
             .setEof(eof);
-    String id = xceiverClient.getPipeline().getFirstNode().getUuidString();
+    final String id = pipeline.getFirstNode().getUuidString();
     ContainerCommandRequestProto.Builder builder =
         ContainerCommandRequestProto.newBuilder().setCmdType(Type.PutBlock)
             .setContainerID(containerBlockData.getBlockID().getContainerID())
@@ -246,8 +255,7 @@ public final class ContainerProtocolCalls  {
     if (token != null) {
       builder.setEncodedToken(token.encodeToUrlString());
     }
-    ContainerCommandRequestProto request = builder.build();
-    return xceiverClient.sendCommandAsync(request);
+    return builder.build();
   }
 
   /**
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 916d3e7f5b..b9707e5af2 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -427,6 +427,20 @@ public class ContainerStateMachine extends BaseStateMachine {
     return dispatchCommand(requestProto, context);
   }
 
+  private CompletableFuture<ContainerCommandResponseProto> runCommandAsync(
+      ContainerCommandRequestProto requestProto, LogEntryProto entry) {
+    return CompletableFuture.supplyAsync(() -> {
+      final DispatcherContext context = new DispatcherContext.Builder()
+          .setTerm(entry.getTerm())
+          .setLogIndex(entry.getIndex())
+          .setStage(DispatcherContext.WriteChunkStage.COMMIT_DATA)
+          .setContainer2BCSIDMap(container2BCSIDMap)
+          .build();
+
+      return runCommand(requestProto, context);
+    }, executor);
+  }
+
   private CompletableFuture<Message> handleWriteChunk(
       ContainerCommandRequestProto requestProto, long entryIndex, long term,
       long startTime) {
@@ -560,19 +574,16 @@ public class ContainerStateMachine extends BaseStateMachine {
           "DataStream: " + stream + " is not closed properly"));
     }
 
-    final CompletableFuture<ContainerCommandResponseProto> f;
+    final ContainerCommandRequestProto request;
     if (dataChannel instanceof KeyValueStreamDataChannel) {
-      f = CompletableFuture.completedFuture(null);
+      request = ((KeyValueStreamDataChannel) dataChannel).getPutBlockRequest();
     } else {
       return JavaUtils.completeExceptionally(new IllegalStateException(
           "Unexpected DataChannel " + dataChannel.getClass()));
     }
-    return f.whenComplete((res, e) -> {
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("PutBlock {} Term: {} Index: {}",
-            res.getResult(), entry.getTerm(), entry.getIndex());
-      }
-    });
+    return runCommandAsync(request, entry).whenComplete(
+        (res, e) -> LOG.debug("link {}, entry: {}, request: {}",
+            res.getResult(), entry, request));
   }
 
   private ExecutorService getChunkExecutor(WriteChunkRequestProto req) {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
index 66723031f0..99dc40f5d0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
@@ -18,17 +18,131 @@
 
 package org.apache.hadoop.ozone.container.keyvalue.impl;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
+import org.apache.hadoop.hdds.ratis.RatisHelper;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.ratis.thirdparty.com.google.protobuf.ByteString;
+import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf;
+import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled;
+import org.apache.ratis.util.ReferenceCountedObject;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Deque;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.concurrent.atomic.AtomicBoolean;
+import java.util.concurrent.atomic.AtomicReference;
 
 /**
  * This class is used to get the DataChannel for streaming.
  */
 public class KeyValueStreamDataChannel extends StreamDataChannelBase {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(KeyValueStreamDataChannel.class);
+
+  /**
+   * Keep the last {@link Buffers#max} bytes in the buffer
+   * in order to create putBlockRequest
+   * at {@link #closeBuffers(Buffers, WriteMethod)}}.
+   */
+  static class Buffers {
+    private final Deque<ReferenceCountedObject<ByteBuffer>> deque
+        = new LinkedList<>();
+    private final int max;
+    private int length;
+
+    Buffers(int max) {
+      this.max = max;
+    }
+
+    private boolean isExtra(int n) {
+      return length - n >= max;
+    }
+
+    private boolean hasExtraBuffer() {
+      return Optional.ofNullable(deque.peek())
+          .map(ReferenceCountedObject::get)
+          .filter(b -> isExtra(b.remaining()))
+          .isPresent();
+    }
+
+    /**
+     * @return extra buffers which are safe to be written.
+     */
+    Iterable<ReferenceCountedObject<ByteBuffer>> offer(
+        ReferenceCountedObject<ByteBuffer> ref) {
+      final ByteBuffer buffer = ref.retain();
+      LOG.debug("offer {}", buffer);
+      final boolean offered = deque.offer(ref);
+      Preconditions.checkState(offered, "Failed to offer");
+      length += buffer.remaining();
+
+      return () -> new Iterator<ReferenceCountedObject<ByteBuffer>>() {
+        @Override
+        public boolean hasNext() {
+          return hasExtraBuffer();
+        }
+
+        @Override
+        public ReferenceCountedObject<ByteBuffer> next() {
+          final ReferenceCountedObject<ByteBuffer> polled = poll();
+          length -= polled.get().remaining();
+          Preconditions.checkState(length >= max);
+          return polled;
+        }
+      };
+    }
+
+    ReferenceCountedObject<ByteBuffer> poll() {
+      final ReferenceCountedObject<ByteBuffer> polled
+          = Objects.requireNonNull(deque.poll());
+      RatisHelper.debug(polled.get(), "polled", LOG);
+      return polled;
+    }
+
+    ReferenceCountedObject<ByteBuf> pollAll() {
+      Preconditions.checkState(!deque.isEmpty(), "The deque is empty");
+      final ByteBuffer[] array = new ByteBuffer[deque.size()];
+      final List<ReferenceCountedObject<ByteBuffer>> refs
+          = new ArrayList<>(deque.size());
+      for (int i = 0; i < array.length; i++) {
+        final ReferenceCountedObject<ByteBuffer> ref = poll();
+        refs.add(ref);
+        array[i] = ref.get();
+      }
+      final ByteBuf buf = Unpooled.wrappedBuffer(array).asReadOnly();
+      return ReferenceCountedObject.wrap(buf, () -> {
+      }, () -> {
+        buf.release();
+        refs.forEach(ReferenceCountedObject::release);
+      });
+    }
+  }
+
+  interface WriteMethod {
+    int applyAsInt(ByteBuffer src) throws IOException;
+  }
+
+  private final Buffers buffers = new Buffers(
+      BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX);
+  private final AtomicReference<ContainerCommandRequestProto> putBlockRequest
+      = new AtomicReference<>();
+  private final AtomicBoolean closed = new AtomicBoolean();
+
   KeyValueStreamDataChannel(File file, ContainerData containerData,
                             ContainerMetrics metrics)
       throws StorageContainerException {
@@ -39,4 +153,125 @@ public class KeyValueStreamDataChannel extends StreamDataChannelBase {
   ContainerProtos.Type getType() {
     return ContainerProtos.Type.StreamWrite;
   }
+
+  @Override
+  public int write(ReferenceCountedObject<ByteBuffer> referenceCounted)
+      throws IOException {
+    assertOpen();
+    return writeBuffers(referenceCounted, buffers, super::writeFileChannel);
+  }
+
+  static int writeBuffers(ReferenceCountedObject<ByteBuffer> src,
+      Buffers buffers, WriteMethod writeMethod)
+      throws IOException {
+    for (ReferenceCountedObject<ByteBuffer> b : buffers.offer(src)) {
+      try {
+        writeFully(b.get(), writeMethod);
+      } finally {
+        b.release();
+      }
+    }
+    return src.get().remaining();
+  }
+
+  private static void writeFully(ByteBuffer b, WriteMethod writeMethod)
+      throws IOException {
+    for (; b.remaining() > 0;) {
+      final int written = writeMethod.applyAsInt(b);
+      if (written <= 0) {
+        throw new IOException("Unable to write");
+      }
+    }
+  }
+
+  public ContainerCommandRequestProto getPutBlockRequest() {
+    return Objects.requireNonNull(putBlockRequest.get(),
+        () -> "putBlockRequest == null, " + this);
+  }
+
+  void assertOpen() throws IOException {
+    if (closed.get()) {
+      throw new IOException("Already closed: " + this);
+    }
+  }
+
+  @Override
+  public void close() throws IOException {
+    if (closed.compareAndSet(false, true)) {
+      putBlockRequest.set(closeBuffers(buffers, super::writeFileChannel));
+      super.close();
+    }
+  }
+
+  static ContainerCommandRequestProto closeBuffers(
+      Buffers buffers, WriteMethod writeMethod) throws IOException {
+    final ReferenceCountedObject<ByteBuf> ref = buffers.pollAll();
+    final ByteBuf buf = ref.retain();
+    final ContainerCommandRequestProto putBlockRequest;
+    try {
+      putBlockRequest = readPutBlockRequest(buf);
+      // write the remaining data
+      writeFully(buf.nioBuffer(), writeMethod);
+    } finally {
+      ref.release();
+    }
+    return putBlockRequest;
+  }
+
+  private static int readProtoLength(ByteBuf b, int lengthIndex) {
+    final int readerIndex = b.readerIndex();
+    LOG.debug("{}, lengthIndex = {}, readerIndex = {}",
+        b, lengthIndex, readerIndex);
+    if (lengthIndex > readerIndex) {
+      b.readerIndex(lengthIndex);
+    } else {
+      Preconditions.checkState(lengthIndex == readerIndex);
+    }
+    RatisHelper.debug(b, "readProtoLength", LOG);
+    return b.nioBuffer().getInt();
+  }
+
+  static ContainerCommandRequestProto readPutBlockRequest(ByteBuf b)
+      throws IOException {
+    //   readerIndex   protoIndex   lengthIndex    readerIndex+readableBytes
+    //         V            V             V                              V
+    // format: |--- data ---|--- proto ---|--- proto length (4 bytes) ---|
+    final int readerIndex = b.readerIndex();
+    final int lengthIndex = readerIndex + b.readableBytes() - 4;
+    final int protoLength = readProtoLength(b.duplicate(), lengthIndex);
+    final int protoIndex = lengthIndex - protoLength;
+
+    final ContainerCommandRequestProto proto;
+    try {
+      proto = readPutBlockRequest(b.slice(protoIndex, protoLength).nioBuffer());
+    } catch (Throwable t) {
+      RatisHelper.debug(b, "catch", LOG);
+      throw new IOException("Failed to readPutBlockRequest from " + b
+          + ": readerIndex=" + readerIndex
+          + ", protoIndex=" + protoIndex
+          + ", protoLength=" + protoLength
+          + ", lengthIndex=" + lengthIndex, t);
+    }
+
+    // set index for reading data
+    b.writerIndex(protoIndex);
+
+    return proto;
+  }
+
+  private static ContainerCommandRequestProto readPutBlockRequest(ByteBuffer b)
+      throws IOException {
+    RatisHelper.debug(b, "readPutBlockRequest", LOG);
+    final ByteString byteString = ByteString.copyFrom(b);
+
+    final ContainerCommandRequestProto request =
+        ContainerCommandRequestMessage.toProto(byteString, null);
+
+    if (!request.hasPutBlock()) {
+      throw new StorageContainerException(
+          "Malformed PutBlock request. trace ID: " + request.getTraceID(),
+          ContainerProtos.Result.MALFORMED_REQUEST);
+    }
+    return request;
+  }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
index b31e2ccbf4..9829033248 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
@@ -79,8 +79,7 @@ abstract class StreamDataChannelBase implements StateMachine.DataChannel {
     randomAccessFile.close();
   }
 
-  @Override
-  public int write(ByteBuffer src) throws IOException {
+  final int writeFileChannel(ByteBuffer src) throws IOException {
     final int writeBytes = getChannel().write(src);
     metrics.incContainerBytesStats(getType(), writeBytes);
     containerData.updateWriteStats(writeBytes, false);
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java
new file mode 100644
index 0000000000..d252b1cb1b
--- /dev/null
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/TestKeyValueStreamDataChannel.java
@@ -0,0 +1,313 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.keyvalue.impl;
+
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.BlockData;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.DatanodeBlockID;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.PutBlockRequestProto;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Type;
+import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
+import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.Buffers;
+import org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.WriteMethod;
+import org.apache.ratis.client.api.DataStreamOutput;
+import org.apache.ratis.io.FilePositionCount;
+import org.apache.ratis.io.StandardWriteOption;
+import org.apache.ratis.io.WriteOption;
+import org.apache.ratis.proto.RaftProtos.CommitInfoProto;
+import org.apache.ratis.proto.RaftProtos.DataStreamPacketHeaderProto;
+import org.apache.ratis.protocol.ClientId;
+import org.apache.ratis.protocol.DataStreamReply;
+import org.apache.ratis.protocol.RaftClientReply;
+import org.apache.ratis.thirdparty.io.netty.buffer.ByteBuf;
+import org.apache.ratis.thirdparty.io.netty.buffer.Unpooled;
+import org.apache.ratis.util.ReferenceCountedObject;
+import org.junit.Assert;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.WritableByteChannel;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Random;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadLocalRandom;
+
+import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.PUT_BLOCK_REQUEST_LENGTH_MAX;
+import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.executePutBlockClose;
+import static org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput.getProtoLength;
+import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.closeBuffers;
+import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.readPutBlockRequest;
+import static org.apache.hadoop.ozone.container.keyvalue.impl.KeyValueStreamDataChannel.writeBuffers;
+
+/** For testing {@link KeyValueStreamDataChannel}. */
+public class TestKeyValueStreamDataChannel {
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestKeyValueStreamDataChannel.class);
+
+  static final ContainerCommandRequestProto PUT_BLOCK_PROTO
+      = ContainerCommandRequestProto.newBuilder()
+      .setCmdType(Type.PutBlock)
+      .setPutBlock(PutBlockRequestProto.newBuilder().setBlockData(
+          BlockData.newBuilder().setBlockID(DatanodeBlockID.newBuilder()
+              .setContainerID(222).setLocalID(333).build()).build()))
+      .setDatanodeUuid("datanodeId")
+      .setContainerID(111L)
+      .build();
+  static final int PUT_BLOCK_PROTO_SIZE = PUT_BLOCK_PROTO.toByteString().size();
+  static {
+    LOG.info("PUT_BLOCK_PROTO_SIZE = {}", PUT_BLOCK_PROTO_SIZE);
+  }
+
+  @Test
+  public void testSerialization() throws Exception {
+    final int max = PUT_BLOCK_REQUEST_LENGTH_MAX;
+    final ByteBuffer putBlockBuf = ContainerCommandRequestMessage.toMessage(
+        PUT_BLOCK_PROTO, null).getContent().asReadOnlyByteBuffer();
+    final ByteBuffer protoLengthBuf = getProtoLength(putBlockBuf, max);
+
+    // random data size
+    final int dataSize = ThreadLocalRandom.current().nextInt(1000) + 100;
+    final byte[] data = new byte[dataSize];
+
+    //serialize
+    final ByteBuf buf = Unpooled.buffer(max);
+    buf.writeBytes(data);
+    buf.writeBytes(putBlockBuf);
+    buf.writeBytes(protoLengthBuf);
+
+    final ContainerCommandRequestProto proto = readPutBlockRequest(buf);
+    Assert.assertEquals(PUT_BLOCK_PROTO, proto);
+  }
+
+  @Test
+  public void testBuffers() throws Exception {
+    final ExecutorService executor = Executors.newFixedThreadPool(32);
+    final List<CompletableFuture<String>> futures = new ArrayList<>();
+
+    final int min = PUT_BLOCK_PROTO_SIZE + 4;
+    final int[] maxValues = {min, 2 * min, 10 * min};
+    final int[] dataSizes = {0, 10, 100, 10_000};
+    for (int max : maxValues) {
+      for (int dataSize : dataSizes) {
+        futures.add(CompletableFuture.supplyAsync(
+            () -> runTestBuffers(dataSize, max), executor));
+      }
+    }
+
+    for (CompletableFuture<String> f : futures) {
+      f.get();
+    }
+  }
+
+  static String runTestBuffers(int dataSize, int max) {
+    final int seed = ThreadLocalRandom.current().nextInt();
+    final String name = String.format("[dataSize=%d,max=%d,seed=%H]",
+        dataSize, max, seed);
+    LOG.info(name);
+    try {
+      runTestBuffers(dataSize, max, seed, name);
+    } catch (Throwable t) {
+      throw new CompletionException("Failed " + name, t);
+    }
+    return name;
+  }
+
+  static void runTestBuffers(int dataSize, int max, int seed, String name)
+      throws Exception {
+    Assert.assertTrue(max >= PUT_BLOCK_PROTO_SIZE);
+
+    // random data
+    final byte[] data = new byte[dataSize];
+    final Random random = new Random(seed);
+    random.nextBytes(data);
+
+    // write output
+    final Buffers buffers = new Buffers(max);
+    final Output out = new Output(buffers);
+    for (int offset = 0; offset < dataSize;) {
+      final int randomLength = random.nextInt(4 * max);
+      final int length = Math.min(randomLength, dataSize - offset);
+      LOG.info("{}: offset = {}, length = {}", name, offset, length);
+      final ByteBuffer b = ByteBuffer.wrap(data, offset, length);
+      final DataStreamReply writeReply = out.writeAsync(b).get();
+      assertReply(writeReply, length, null);
+      offset += length;
+    }
+
+    // close
+    final DataStreamReply closeReply = executePutBlockClose(
+        PUT_BLOCK_PROTO, max, out).get();
+    assertReply(closeReply, 0, PUT_BLOCK_PROTO);
+
+    // check output
+    final ByteBuf outBuf = out.getOutBuf();
+    LOG.info("outBuf = {}", outBuf);
+    Assert.assertEquals(dataSize, outBuf.readableBytes());
+    for (int i = 0; i < dataSize; i++) {
+      Assert.assertEquals(data[i], outBuf.readByte());
+    }
+    outBuf.release();
+  }
+
+  static void assertReply(DataStreamReply reply, int byteWritten,
+      ContainerCommandRequestProto proto) {
+    Assert.assertTrue(reply.isSuccess());
+    Assert.assertEquals(byteWritten, reply.getBytesWritten());
+    Assert.assertEquals(proto, ((Reply)reply).getPutBlockRequest());
+  }
+
+  static class Output implements DataStreamOutput {
+    private final Buffers buffers;
+    private final ByteBuf outBuf = Unpooled.buffer();
+    private final WriteMethod writeMethod = src -> {
+      final int remaining = src.remaining();
+      outBuf.writeBytes(src);
+      return remaining;
+    };
+
+    Output(Buffers buffers) {
+      this.buffers = buffers;
+    }
+
+    ByteBuf getOutBuf() {
+      return outBuf;
+    }
+
+    @Override
+    public CompletableFuture<DataStreamReply> writeAsync(
+        ByteBuffer src, WriteOption... writeOptions) {
+      final int written;
+      try {
+        written = writeBuffers(
+            ReferenceCountedObject.wrap(src, () -> { }, () -> { }),
+            buffers, writeMethod);
+      } catch (IOException e) {
+        return completeExceptionally(e);
+      }
+      if (WriteOption.containsOption(writeOptions, StandardWriteOption.CLOSE)) {
+        return closeAsync();
+      }
+      return CompletableFuture.completedFuture(
+          new Reply(true, written));
+    }
+
+    @Override
+    public CompletableFuture<DataStreamReply> closeAsync() {
+      final ContainerCommandRequestProto putBlockRequest;
+      try {
+        putBlockRequest = closeBuffers(buffers, writeMethod);
+      } catch (IOException e) {
+        return completeExceptionally(e);
+      }
+      return CompletableFuture.completedFuture(
+          new Reply(true, 0, putBlockRequest));
+    }
+
+    @Override
+    public CompletableFuture<DataStreamReply> writeAsync(
+        FilePositionCount filePositionCount, WriteOption... writeOptions) {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public CompletableFuture<RaftClientReply> getRaftClientReplyFuture() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public WritableByteChannel getWritableByteChannel() {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  static class Reply implements DataStreamReply {
+    private final boolean success;
+    private final long bytesWritten;
+    private final ContainerCommandRequestProto putBlockRequest;
+
+    Reply(boolean success, long bytesWritten) {
+      this(success, bytesWritten, null);
+    }
+
+    Reply(boolean success, long bytesWritten,
+        ContainerCommandRequestProto putBlockRequest) {
+      this.success = success;
+      this.bytesWritten = bytesWritten;
+      this.putBlockRequest = putBlockRequest;
+    }
+
+    ContainerCommandRequestProto getPutBlockRequest() {
+      return putBlockRequest;
+    }
+
+    @Override
+    public boolean isSuccess() {
+      return success;
+    }
+
+    @Override
+    public long getBytesWritten() {
+      return bytesWritten;
+    }
+
+    @Override
+    public Collection<CommitInfoProto> getCommitInfos() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public ClientId getClientId() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public DataStreamPacketHeaderProto.Type getType() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long getStreamId() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long getStreamOffset() {
+      throw new UnsupportedOperationException();
+    }
+
+    @Override
+    public long getDataLength() {
+      throw new UnsupportedOperationException();
+    }
+  }
+
+  static CompletableFuture<DataStreamReply> completeExceptionally(Throwable t) {
+    final CompletableFuture<DataStreamReply> f = new CompletableFuture<>();
+    f.completeExceptionally(t);
+    return f;
+  }
+}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 65f7348740..c8a0115a80 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -120,7 +120,7 @@ public class TestBlockDataStreamOutput {
     objectStore.getVolume(volumeName).createBucket(bucketName);
   }
 
-  private String getKeyName() {
+  static String getKeyName() {
     return UUID.randomUUID().toString();
   }
 
@@ -158,13 +158,11 @@ public class TestBlockDataStreamOutput {
     testWriteWithFailure(blockSize + 50);
   }
 
-  private void testWrite(int dataLength) throws Exception {
+  static void testWrite(int dataLength) throws Exception {
     String keyName = getKeyName();
     OzoneDataStreamOutput key = createKey(
         keyName, ReplicationType.RATIS, dataLength);
-    byte[] data =
-        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
-            .getBytes(UTF_8);
+    final byte[] data = ContainerTestHelper.generateData(dataLength, false);
     key.write(ByteBuffer.wrap(data));
     // now close the stream, It will update the key length.
     key.close();
@@ -221,14 +219,14 @@ public class TestBlockDataStreamOutput {
   }
 
 
-  private OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
+  static OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
       long size) throws Exception {
     return TestHelper.createStreamKey(
         keyName, type, size, objectStore, volumeName, bucketName);
   }
-  private void validateData(String keyName, byte[] data) throws Exception {
-    TestHelper
-        .validateData(keyName, data, objectStore, volumeName, bucketName);
+  static void validateData(String keyName, byte[] data) throws Exception {
+    TestHelper.validateData(
+        keyName, data, objectStore, volumeName, bucketName);
   }
 
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 25/38: HDDS-6298. Add XceiverServerRatis stream config (#3070)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 1a31d6beef30bcc6917adb1475e2d4c45b01914c
Author: hao guo <gu...@360.cn>
AuthorDate: Fri Feb 11 11:25:51 2022 +0800

    HDDS-6298. Add XceiverServerRatis stream config (#3070)
    
    (cherry picked from commit 113a56e4ce7be24af9260f9fd9a66bfde2b7012d)
    (cherry picked from commit f59d30da451fe29b0486b4e9507ae74bd184f440)
---
 .../src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java    | 7 ++++++-
 1 file changed, 6 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 43b34a1563..4c3343d2fe 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -340,6 +340,10 @@ public final class RatisHelper {
     return key.startsWith(NettyConfigKeys.DataStream.PREFIX);
   }
 
+  private static boolean isStreamClientConfig(String key) {
+    return key.startsWith(RaftClientConfigKeys.DataStream.PREFIX);
+  }
+
   /**
    * Set all server properties matching with prefix
    * {@link RatisHelper#HDDS_DATANODE_RATIS_PREFIX_KEY} in
@@ -354,7 +358,8 @@ public final class RatisHelper {
         getDatanodeRatisPrefixProps(ozoneConf);
     ratisServerConf.forEach((key, val) -> {
       // Exclude ratis client configuration.
-      if (!isClientConfig(key)) {
+      if (isNettyStreamConfig(key) || isStreamClientConfig(key) ||
+          !isClientConfig(key)) {
         raftProperties.set(key, val);
       }
     });


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 34/38: HDDS-5666. Add option to createKey via streaming api in Freon (#2574)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 4a5b68370b27a13740dcb9bff7ff5f1641cede2e
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Fri Apr 8 17:38:09 2022 +0530

    HDDS-5666. Add option to createKey via streaming api in Freon (#2574)
    
    (cherry picked from commit 51b5395786363085d4a5cf3322d1a2d4730c6999)
    (cherry picked from commit a2501c3de3052a6bfc9729a73fbb99453dbfdd72)
---
 .../hadoop/ozone/freon/ContentGenerator.java       | 18 ++++++++++++++
 .../ozone/freon/OzoneClientKeyGenerator.java       | 29 +++++++++++++++++++++-
 2 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java
index 92f7ae4b2e..b01c12f6b3 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java
@@ -18,10 +18,12 @@ package org.apache.hadoop.ozone.freon;
 
 import java.io.IOException;
 import java.io.OutputStream;
+import java.nio.ByteBuffer;
 import java.nio.charset.StandardCharsets;
 
 import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 
 /**
  * Utility class to write random keys from a limited buffer.
@@ -81,6 +83,22 @@ public class ContentGenerator {
     }
   }
 
+  /**
+   * Write the required bytes to the streaming output stream.
+   */
+  public void write(OzoneDataStreamOutput out) throws IOException {
+    for (long nrRemaining = keySize;
+         nrRemaining > 0; nrRemaining -= bufferSize) {
+      int curSize = (int) Math.min(bufferSize, nrRemaining);
+      for (int i = 0; i < curSize; i += copyBufferSize) {
+        ByteBuffer bb =
+            ByteBuffer.wrap(buffer, i, Math.min(copyBufferSize, curSize - i));
+        out.write(bb);
+      }
+    }
+    out.close();
+  }
+
   @VisibleForTesting
   byte[] getBuffer() {
     return buffer;
diff --git a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
index 3e07f3b22e..b0184bff67 100644
--- a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
+++ b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java
@@ -24,10 +24,12 @@ import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 
 import com.codahale.metrics.Timer;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import picocli.CommandLine.Command;
 import picocli.CommandLine.Mixin;
 import picocli.CommandLine.Option;
@@ -74,6 +76,12 @@ public class OzoneClientKeyGenerator extends BaseFreonGenerator
   @Mixin
   private FreonReplicationOptions replication;
 
+  @Option(
+      names = {"--enable-streaming", "--stream"},
+      description = "Specify whether the write will be through ratis streaming"
+  )
+  private boolean enableRatisStreaming = false;
+
   private Timer timer;
 
   private OzoneBucket bucket;
@@ -101,7 +109,11 @@ public class OzoneClientKeyGenerator extends BaseFreonGenerator
 
       timer = getMetrics().timer("key-create");
 
-      runTests(this::createKey);
+      if (enableRatisStreaming) {
+        runTests(this::createStreamKey);
+      } else {
+        runTests(this::createKey);
+      }
     }
     return null;
   }
@@ -118,4 +130,19 @@ public class OzoneClientKeyGenerator extends BaseFreonGenerator
       return null;
     });
   }
+
+  private void createStreamKey(long counter) throws Exception {
+    final ReplicationConfig replicationConfig = ReplicationConfig
+        .fromProtoTypeAndFactor(HddsProtos.ReplicationType.RATIS,
+            HddsProtos.ReplicationFactor.THREE);
+    final String key = generateObjectName(counter);
+
+    timer.time(() -> {
+      try (OzoneDataStreamOutput stream = bucket
+          .createStreamKey(key, keySize, replicationConfig, metadata)) {
+        contentGenerator.write(stream);
+      }
+      return null;
+    });
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 27/38: HDDS-6282. Fix BlockDataStreamOutput#doFlushIfNeeded NPE (#3060)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit cca5e3b020001f987698d1c346ea8664eed281d4
Author: hao guo <gu...@360.cn>
AuthorDate: Tue Feb 15 19:22:13 2022 +0800

    HDDS-6282. Fix BlockDataStreamOutput#doFlushIfNeeded NPE (#3060)
    
    (cherry picked from commit 8f8f166615d0df548233690dbf178d4c966d9fe1)
    (cherry picked from commit 9d24f5870443ad3a9f8b6a508bcc1426e3c8bc4f)
---
 .../java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 9ac43300f7..8dd9e6b50e 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -318,7 +318,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     // the bufferFull condition in async write path.
     long streamWindow = config.getStreamWindowSize() / config
         .getDataStreamMinPacketSize();
-    if (!bufferList.isEmpty() && bufferList.size() % boundary == 0) {
+    if (!bufferList.isEmpty() && bufferList.size() % boundary == 0 &&
+        buffersForPutBlock != null && !buffersForPutBlock.isEmpty()) {
       updateFlushLength();
       executePutBlock(false, false);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 31/38: HDDS-5798. [Ozone-Streaming] Setup TlsConf parameters. (#3207)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 8e6be787c972581be690aca1fde1a7787b85af02
Author: Tsz-Wo Nicholas Sze <sz...@apache.org>
AuthorDate: Sun Mar 27 17:07:28 2022 +0800

    HDDS-5798. [Ozone-Streaming] Setup TlsConf parameters. (#3207)
    
    (cherry picked from commit 181a17eab3e704ca8b11bf9602b03bdd9672e033)
    (cherry picked from commit 22e77a7b3d963bae49b93ab26a4eedb615cd1511)
---
 .../common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index 4c3343d2fe..c50a184285 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -271,6 +271,7 @@ public final class RatisHelper {
       GrpcTlsConfig tlsConfig) {
     if (tlsConfig != null) {
       GrpcConfigKeys.Client.setTlsConf(parameters, tlsConfig);
+      NettyConfigKeys.DataStream.Client.setTlsConf(parameters, tlsConfig);
     }
   }
 
@@ -281,6 +282,8 @@ public final class RatisHelper {
       GrpcConfigKeys.Server.setTlsConf(parameters, serverConf);
       GrpcConfigKeys.TLS.setConf(parameters, serverConf);
       setAdminTlsConf(parameters, serverConf);
+
+      NettyConfigKeys.DataStream.Server.setTlsConf(parameters, serverConf);
     }
     setClientTlsConf(parameters, clientConf);
     return parameters;


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 28/38: HDDS-6229. [Ozone-Streaming] Data Channel abstraction on datanode (#3023)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 3dde9e27b916daf73c80c5304a61477683ce4d21
Author: hao guo <gu...@360.cn>
AuthorDate: Tue Feb 15 23:35:46 2022 +0800

    HDDS-6229. [Ozone-Streaming] Data Channel abstraction on datanode (#3023)
    
    (cherry picked from commit 3cc2a7e21be12d0a88897a4a95fc5f80f37cd100)
    (cherry picked from commit de690f6c259437bdce4389c7b06b189fa4cd20c0)
---
 .../keyvalue/impl/KeyValueStreamDataChannel.java   | 56 ++--------------------
 ...DataChannel.java => StreamDataChannelBase.java} | 39 ++++++++-------
 2 files changed, 27 insertions(+), 68 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
index c0570f5d4d..14ead4ea86 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
@@ -22,69 +22,21 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
-import org.apache.ratis.statemachine.StateMachine;
 
 import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.RandomAccessFile;
-import java.nio.ByteBuffer;
 
 /**
  * This class is used to get the DataChannel for streaming.
  */
-class KeyValueStreamDataChannel implements StateMachine.DataChannel {
-  private final RandomAccessFile randomAccessFile;
-  private final File file;
-
-  private final ContainerData containerData;
-  private final ContainerMetrics metrics;
-
+class KeyValueStreamDataChannel extends StreamDataChannelBase {
   KeyValueStreamDataChannel(File file, ContainerData containerData,
                             ContainerMetrics metrics)
       throws StorageContainerException {
-    try {
-      this.file = file;
-      this.randomAccessFile = new RandomAccessFile(file, "rw");
-    } catch (FileNotFoundException e) {
-      throw new StorageContainerException("BlockFile not exists with " +
-          "container Id " + containerData.getContainerID() +
-          " file " + file.getAbsolutePath(),
-          ContainerProtos.Result.IO_EXCEPTION);
-    }
-    this.containerData = containerData;
-    this.metrics = metrics;
-  }
-
-  @Override
-  public void force(boolean metadata) throws IOException {
-    randomAccessFile.getChannel().force(metadata);
-  }
-
-  @Override
-  public int write(ByteBuffer src) throws IOException {
-    int writeBytes = randomAccessFile.getChannel().write(src);
-    metrics
-        .incContainerBytesStats(ContainerProtos.Type.StreamWrite, writeBytes);
-    containerData.updateWriteStats(writeBytes, false);
-    return writeBytes;
-  }
-
-  @Override
-  public boolean isOpen() {
-    return randomAccessFile.getChannel().isOpen();
-  }
-
-  @Override
-  public void close() throws IOException {
-    randomAccessFile.close();
+    super(file, containerData, metrics);
   }
 
   @Override
-  public String toString() {
-    return "KeyValueStreamDataChannel{" +
-        "File=" + file.getAbsolutePath() +
-        ", containerID=" + containerData.getContainerID() +
-        '}';
+  ContainerProtos.Type getType() {
+    return ContainerProtos.Type.StreamWrite;
   }
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
similarity index 77%
copy from hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
copy to hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
index c0570f5d4d..b31e2ccbf4 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/KeyValueStreamDataChannel.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/StreamDataChannelBase.java
@@ -29,19 +29,21 @@ import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.RandomAccessFile;
 import java.nio.ByteBuffer;
+import java.nio.channels.FileChannel;
 
 /**
- * This class is used to get the DataChannel for streaming.
+ * For write state machine data.
  */
-class KeyValueStreamDataChannel implements StateMachine.DataChannel {
+abstract class StreamDataChannelBase implements StateMachine.DataChannel {
   private final RandomAccessFile randomAccessFile;
+
   private final File file;
 
   private final ContainerData containerData;
   private final ContainerMetrics metrics;
 
-  KeyValueStreamDataChannel(File file, ContainerData containerData,
-                            ContainerMetrics metrics)
+  StreamDataChannelBase(File file, ContainerData containerData,
+                        ContainerMetrics metrics)
       throws StorageContainerException {
     try {
       this.file = file;
@@ -56,23 +58,20 @@ class KeyValueStreamDataChannel implements StateMachine.DataChannel {
     this.metrics = metrics;
   }
 
-  @Override
-  public void force(boolean metadata) throws IOException {
-    randomAccessFile.getChannel().force(metadata);
+  abstract ContainerProtos.Type getType();
+
+  private FileChannel getChannel() {
+    return randomAccessFile.getChannel();
   }
 
   @Override
-  public int write(ByteBuffer src) throws IOException {
-    int writeBytes = randomAccessFile.getChannel().write(src);
-    metrics
-        .incContainerBytesStats(ContainerProtos.Type.StreamWrite, writeBytes);
-    containerData.updateWriteStats(writeBytes, false);
-    return writeBytes;
+  public final void force(boolean metadata) throws IOException {
+    getChannel().force(metadata);
   }
 
   @Override
-  public boolean isOpen() {
-    return randomAccessFile.getChannel().isOpen();
+  public final boolean isOpen() {
+    return getChannel().isOpen();
   }
 
   @Override
@@ -80,9 +79,17 @@ class KeyValueStreamDataChannel implements StateMachine.DataChannel {
     randomAccessFile.close();
   }
 
+  @Override
+  public int write(ByteBuffer src) throws IOException {
+    final int writeBytes = getChannel().write(src);
+    metrics.incContainerBytesStats(getType(), writeBytes);
+    containerData.updateWriteStats(writeBytes, false);
+    return writeBytes;
+  }
+
   @Override
   public String toString() {
-    return "KeyValueStreamDataChannel{" +
+    return getClass().getSimpleName() + "{" +
         "File=" + file.getAbsolutePath() +
         ", containerID=" + containerData.getContainerID() +
         '}';


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 10/38: HDDS-5849. [Ozone-Streaming]Write exceptions occur after checksum is enabled (#2729)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit b1f06e8480b389d9770b0f261833f8db25112dc2
Author: micah zhao <mi...@tencent.com>
AuthorDate: Tue Oct 12 14:05:07 2021 +0800

    HDDS-5849. [Ozone-Streaming]Write exceptions occur after checksum is enabled (#2729)
    
    (cherry picked from commit aea1d841fb9d0a9e5d7093b6b691586306237a8f)
    (cherry picked from commit 79507fe72f98a0f02a0cad88066437fbb7d63bad)
---
 .../java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java | 3 ++-
 .../org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java  | 2 --
 2 files changed, 2 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 41e2c48bbb..2ae0ba7525 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -519,7 +519,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       throws IOException {
     final int effectiveChunkSize = buf.remaining();
     final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
-    ChecksumData checksumData = checksum.computeChecksum(buf);
+    ChecksumData checksumData = checksum.computeChecksum(
+        buf.asReadOnlyBuffer());
     ChunkInfo chunkInfo = ChunkInfo.newBuilder()
         .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex)
         .setOffset(offset)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 6d5401d651..d3b2d22577 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.ozone.client.rpc;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -82,7 +81,6 @@ public class TestBlockDataStreamOutput {
     blockSize = 2 * maxFlushSize;
 
     OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
-    clientConfig.setChecksumType(ChecksumType.NONE);
     clientConfig.setStreamBufferFlushDelay(false);
     conf.setFromObject(clientConfig);
 


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 23/38: HDDS-6281. Update ratis version to 2.3.0-94db58b-SNAPSHOT version (#3059)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit b3ec54827fea100169aaaa4d6487116e20fce705
Author: hao guo <gu...@360.cn>
AuthorDate: Thu Feb 10 09:04:29 2022 +0800

    HDDS-6281. Update ratis version to 2.3.0-94db58b-SNAPSHOT version (#3059)
    
    (cherry picked from commit 84728e0cd0ded5215800d58ecf3724cb772d8adf)
    (cherry picked from commit fdd7b641562377404c2bfe4080d2b56c789b261e)
---
 hadoop-ozone/dist/src/main/license/update-jar-report.sh | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/hadoop-ozone/dist/src/main/license/update-jar-report.sh b/hadoop-ozone/dist/src/main/license/update-jar-report.sh
index dfd68a264d..6cc6238f40 100755
--- a/hadoop-ozone/dist/src/main/license/update-jar-report.sh
+++ b/hadoop-ozone/dist/src/main/license/update-jar-report.sh
@@ -30,4 +30,4 @@ cd "$OZONE_DIST_DIR"
 
 #sed expression removes the version. Usually license is not changed with version bumps
 #jacoco and test dependencies are excluded
-find . -type f -name "*.jar" | cut -c3- | perl -wpl -e 's/-[0-9]+(.[0-9]+)*(-([0-9a-z]+-)?SNAPSHOT)?+//g; s/\.v\d+\.jar/.jar/g;' | grep -v -e jacoco -e hdds-test-utils | sort > "$SCRIPTDIR"/$REPORT_NAME
+find . -type f -name "*.jar" | cut -c3- | perl -wpl -e 's/-[0-9]+(\.[0-9]+)*(-([0-9a-z]+-)?SNAPSHOT)?+//g; s/\.v\d+\.jar/.jar/g;' | grep -v -e jacoco -e hdds-test-utils | sort > "$SCRIPTDIR"/$REPORT_NAME


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 19/38: HDDS-6039. Define a minimum packet size during streaming writes. (#2883)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 7e61b1f64216639b4cac22125f047fe1c56b5a47
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Tue Dec 21 12:22:30 2021 +0530

    HDDS-6039. Define a minimum packet size during streaming writes. (#2883)
    
    (cherry picked from commit 1f56a45e7578fdd495e852f0e449e62b1079c640)
    (cherry picked from commit 750c5f475a4a03cdb078699d5f5a2254f1f491b0)
---
 .../apache/hadoop/hdds/scm/OzoneClientConfig.java  | 16 ++++++
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 57 ++++++++++++++++------
 .../hadoop/hdds/scm/storage/StreamBuffer.java      | 15 +++++-
 .../hdds/scm/storage/StreamCommitWatcher.java      |  2 +-
 .../client/io/BlockDataStreamOutputEntryPool.java  |  2 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |  6 +++
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  6 +++
 .../client/rpc/TestBlockDataStreamOutput.java      | 32 ++++++++++--
 .../rpc/TestContainerStateMachineStream.java       |  1 +
 .../client/rpc/TestOzoneRpcClientAbstract.java     |  1 +
 10 files changed, 118 insertions(+), 20 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index 715a19212b..86a725220a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -81,6 +81,14 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private long dataStreamBufferFlushSize = 16 * 1024 * 1024;
 
+  @Config(key = "datastream.min.packet.size",
+      defaultValue = "1MB",
+      type = ConfigType.SIZE,
+      description = "The maximum size of the ByteBuffer "
+          + "(used via ratis streaming)",
+      tags = ConfigTag.CLIENT)
+  private int dataStreamMinPacketSize = 1024 * 1024;
+
   @Config(key = "stream.buffer.increment",
       defaultValue = "0B",
       type = ConfigType.SIZE,
@@ -267,6 +275,14 @@ public class OzoneClientConfig {
     this.streamBufferMaxSize = streamBufferMaxSize;
   }
 
+  public int getDataStreamMinPacketSize() {
+    return dataStreamMinPacketSize;
+  }
+
+  public void setDataStreamMinPacketSize(int dataStreamMinPacketSize) {
+    this.dataStreamMinPacketSize = dataStreamMinPacketSize;
+  }
+
   public int getMaxRetryCount() {
     return maxRetryCount;
   }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 6f5a54354a..9fb1340527 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -29,6 +29,8 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.KeyValue;
 import org.apache.hadoop.hdds.ratis.ContainerCommandRequestMessage;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
 import org.apache.hadoop.hdds.scm.XceiverClientRatis;
 import org.apache.hadoop.hdds.scm.XceiverClientReply;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -125,7 +127,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   private List<CompletableFuture<DataStreamReply>> futures = new ArrayList<>();
   private final long syncSize = 0; // TODO: disk sync is disabled for now
   private long syncPosition = 0;
-
+  private StreamBuffer currentBuffer;
+  private XceiverClientMetrics metrics;
   /**
    * Creates a new BlockDataStreamOutput.
    *
@@ -172,6 +175,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     ioException = new AtomicReference<>(null);
     checksum = new Checksum(config.getChecksumType(),
         config.getBytesPerChecksum());
+    metrics = XceiverClientManager.getXceiverClientMetrics();
   }
 
   private DataStreamOutput setupStream(Pipeline pipeline) throws IOException {
@@ -257,27 +261,47 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     if (len == 0) {
       return;
     }
-    int curLen = len;
-    // set limit on the number of bytes that a ByteBuffer(StreamBuffer) can hold
-    int maxBufferLen = config.getDataStreamMaxBufferSize();
-    while (curLen > 0) {
-      int writeLen = Math.min(curLen, maxBufferLen);
+    while (len > 0) {
+      allocateNewBufferIfNeeded();
+      int writeLen = Math.min(len, currentBuffer.length());
       final StreamBuffer buf = new StreamBuffer(b, off, writeLen);
+      currentBuffer.put(buf);
+      writeChunkIfNeeded();
       off += writeLen;
-      bufferList.add(buf);
-      writeChunkToContainer(buf.duplicate());
-      curLen -= writeLen;
       writtenDataLength += writeLen;
+      len -= writeLen;
       doFlushIfNeeded();
     }
   }
 
+  private void writeChunkIfNeeded() throws IOException {
+    if (currentBuffer.length()==0) {
+      writeChunk(currentBuffer);
+      currentBuffer = null;
+    }
+  }
+
+  private void writeChunk(StreamBuffer sb) throws IOException {
+    bufferList.add(sb);
+    ByteBuffer dup = sb.duplicate();
+    dup.position(0);
+    dup.limit(sb.position());
+    writeChunkToContainer(dup);
+  }
+
+  private void allocateNewBufferIfNeeded() {
+    if (currentBuffer==null) {
+      currentBuffer =
+          StreamBuffer.allocate(config.getDataStreamMinPacketSize());
+    }
+  }
+
   private void doFlushIfNeeded() throws IOException {
     Preconditions.checkArgument(config.getDataStreamBufferFlushSize() > config
         .getDataStreamMaxBufferSize());
     long boundary = config.getDataStreamBufferFlushSize() / config
         .getDataStreamMaxBufferSize();
-    if (bufferList.size() % boundary == 0) {
+    if (!bufferList.isEmpty() && bufferList.size() % boundary == 0) {
       updateFlushLength();
       executePutBlock(false, false);
     }
@@ -308,11 +332,10 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     int count = 0;
     while (len > 0) {
       final StreamBuffer buf = bufferList.get(count);
-      final long writeLen = Math.min(buf.length(), len);
+      final long writeLen = Math.min(buf.position(), len);
       final ByteBuffer duplicated = buf.duplicate();
-      if (writeLen != buf.length()) {
-        duplicated.limit(Math.toIntExact(len));
-      }
+      duplicated.position(0);
+      duplicated.limit(buf.position());
       writeChunkToContainer(duplicated);
       len -= writeLen;
       count++;
@@ -449,6 +472,11 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       // This can be a partially filled chunk. Since we are flushing the buffer
       // here, we just limit this buffer to the current position. So that next
       // write will happen in new buffer
+
+      if (currentBuffer!=null) {
+        writeChunk(currentBuffer);
+        currentBuffer = null;
+      }
       updateFlushLength();
       executePutBlock(close, false);
     } else if (close) {
@@ -584,6 +612,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
         .setLen(effectiveChunkSize)
         .setChecksumData(checksumData.getProtoBufMessage())
         .build();
+    metrics.incrPendingContainerOpsMetrics(ContainerProtos.Type.WriteChunk);
 
     if (LOG.isDebugEnabled()) {
       LOG.debug("Writing chunk {} length {} at offset {}",
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
index f36019e2ae..5118ea5ead 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
@@ -27,7 +27,7 @@ public class StreamBuffer {
   private final ByteBuffer buffer;
 
   public StreamBuffer(ByteBuffer buffer) {
-    this.buffer = buffer.asReadOnlyBuffer();
+    this.buffer = buffer;
   }
 
   public StreamBuffer(ByteBuffer buffer, int offset, int length) {
@@ -43,4 +43,17 @@ public class StreamBuffer {
     return buffer.limit() - buffer.position();
   }
 
+  public int position() {
+    return buffer.position();
+  }
+
+
+  public void put(StreamBuffer sb){
+    buffer.put(sb.buffer);
+  }
+
+  public static StreamBuffer allocate(int size){
+    return new StreamBuffer(ByteBuffer.allocate(size));
+  }
+
 }
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
index 3a59d07571..9ae604e951 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
@@ -178,7 +178,7 @@ public class StreamCommitWatcher {
       Preconditions.checkState(commitIndexMap.containsKey(index));
       final List<StreamBuffer> buffers = commitIndexMap.remove(index);
       final long length =
-          buffers.stream().mapToLong(StreamBuffer::length).sum();
+          buffers.stream().mapToLong(StreamBuffer::position).sum();
       totalAckDataLength += length;
       // clear the future object from the future Map
       final CompletableFuture<ContainerCommandResponseProto> remove =
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index e49b0b79ad..24a046f623 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -309,7 +309,7 @@ public class BlockDataStreamOutputEntryPool {
   long computeBufferData() {
     long totalDataLen =0;
     for (StreamBuffer b : bufferList){
-      totalDataLen += b.length();
+      totalDataLen += b.position();
     }
     return totalDataLen;
   }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index a97ea318e2..a1783a6cb5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -328,6 +328,7 @@ public interface MiniOzoneCluster {
     protected Optional<Long> dataStreamBufferFlushSize= Optional.empty();
     protected OptionalInt dataStreamMaxBufferSize  = OptionalInt.empty();
     protected Optional<Long> streamBufferMaxSize = Optional.empty();
+    protected OptionalInt dataStreamMinPacketSize = OptionalInt.empty();
     protected Optional<Long> blockSize = Optional.empty();
     protected Optional<StorageUnit> streamBufferSizeUnit = Optional.empty();
     protected boolean includeRecon = false;
@@ -578,6 +579,11 @@ public interface MiniOzoneCluster {
       return this;
     }
 
+    public Builder setDataStreamMinPacketSize(int size) {
+      dataStreamMinPacketSize = OptionalInt.of(size);
+      return this;
+    }
+
     /**
      * Sets the block size for stream buffer.
      *
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 2c26f9725f..872db06aa5 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -660,6 +660,9 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
       if (!dataStreamMaxBufferSize.isPresent()) {
         dataStreamMaxBufferSize = OptionalInt.of(chunkSize.get());
       }
+      if (!dataStreamMinPacketSize.isPresent()) {
+        dataStreamMinPacketSize = OptionalInt.of(chunkSize.get()/4);
+      }
       if (!blockSize.isPresent()) {
         blockSize = Optional.of(2 * streamBufferMaxSize.get());
       }
@@ -681,6 +684,9 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
       clientConfig.setDataStreamMaxBufferSize((int) Math.round(
           streamBufferSizeUnit.get()
               .toBytes(dataStreamMaxBufferSize.getAsInt())));
+      clientConfig.setDataStreamMinPacketSize((int) Math.round(
+          streamBufferSizeUnit.get()
+              .toBytes(dataStreamMinPacketSize.getAsInt())));
       conf.setFromObject(clientConfig);
 
       conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 5eb38a00de..c9242df8b1 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -107,6 +107,7 @@ public class TestBlockDataStreamOutput {
         .setDataStreamBufferFlushize(maxFlushSize)
         .setDataStreamBufferMaxSize(chunkSize)
         .setStreamBufferSizeUnit(StorageUnit.BYTES)
+        .setDataStreamMinPacketSize(2*chunkSize/5)
         .build();
     cluster.waitForClusterToBeReady();
     //the easiest way to create an open container is creating a key
@@ -193,7 +194,7 @@ public class TestBlockDataStreamOutput {
 
   @Test
   public void testPutBlockAtBoundary() throws Exception {
-    int dataLength = 500;
+    int dataLength = 200;
     XceiverClientMetrics metrics =
         XceiverClientManager.getXceiverClientMetrics();
     long putBlockCount = metrics.getContainerOpCountMetrics(
@@ -211,8 +212,8 @@ public class TestBlockDataStreamOutput {
         metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
             <= pendingPutBlockCount + 1);
     key.close();
-    // Since data length is 500 , first putBlock will be at 400(flush boundary)
-    // and the other at 500
+    // Since data length is 200 , first putBlock will be at 160(flush boundary)
+    // and the other at 200
     Assert.assertTrue(
         metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
             == putBlockCount + 2);
@@ -230,4 +231,29 @@ public class TestBlockDataStreamOutput {
         .validateData(keyName, data, objectStore, volumeName, bucketName);
   }
 
+
+  @Test
+  public void testMinPacketSize() throws Exception {
+    String keyName = getKeyName();
+    XceiverClientMetrics metrics =
+        XceiverClientManager.getXceiverClientMetrics();
+    OzoneDataStreamOutput key = createKey(keyName, ReplicationType.RATIS, 0);
+    long writeChunkCount =
+        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk);
+    byte[] data =
+        ContainerTestHelper.getFixedLengthString(keyString, chunkSize / 5)
+            .getBytes(UTF_8);
+    key.write(ByteBuffer.wrap(data));
+    // minPacketSize= 40, so first write of 20 wont trigger a writeChunk
+    Assert.assertEquals(writeChunkCount,
+        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
+    key.write(ByteBuffer.wrap(data));
+    Assert.assertEquals(writeChunkCount + 1,
+        metrics.getContainerOpCountMetrics(ContainerProtos.Type.WriteChunk));
+    // now close the stream, It will update the key length.
+    key.close();
+    String dataString = new String(data, UTF_8);
+    validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
+  }
+
 }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
index 3b17450376..f4c756bccd 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineStream.java
@@ -119,6 +119,7 @@ public class TestContainerStateMachineStream {
     conf.setQuietMode(false);
     cluster =
         MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).setHbInterval(200)
+            .setDataStreamMinPacketSize(1024)
             .build();
     cluster.waitForClusterToBeReady();
     cluster.waitForPipelineTobeReady(HddsProtos.ReplicationFactor.ONE, 60000);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index a6566141d7..13a1f5a76b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -203,6 +203,7 @@ public abstract class TestOzoneRpcClientAbstract {
         .setTotalPipelineNumLimit(10)
         .setScmId(scmId)
         .setClusterId(clusterId)
+        .setDataStreamMinPacketSize(1024)
         .build();
     cluster.waitForClusterToBeReady();
     ozClient = OzoneClientFactory.getRpcClient(conf);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 01/38: HDDS-5366. [Ozone-Streaming] Implement stream method to ContainerStateMachine. (#2358). Contributed by mingchao zhao

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit e15e987a4132734909bc2e1065ef8ce20004b7dc
Author: micah zhao <mi...@tencent.com>
AuthorDate: Wed Jun 23 23:20:27 2021 +0800

    HDDS-5366.  [Ozone-Streaming] Implement stream method to ContainerStateMachine. (#2358).  Contributed by mingchao zhao
    
    (cherry picked from commit 16bafc62d63759b07a154de46bd925d752407b05)
    (cherry picked from commit b3495e8d39ab6ce09637f9ef05acfcf66e3087b8)
---
 .../server/ratis/ContainerStateMachine.java        | 25 ++++++++++
 .../common/transport/server/ratis/LocalStream.java | 50 +++++++++++++++++++
 .../transport/server/ratis/StreamDataChannel.java  | 57 ++++++++++++++++++++++
 3 files changed, 132 insertions(+)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index 02c0a8d2b1..47e042643a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -25,10 +25,12 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.Arrays;
 import java.util.Collection;
+import java.nio.file.Paths;
 import java.util.List;
 import java.util.Map;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.CompletionException;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
@@ -510,6 +512,29 @@ public class ContainerStateMachine extends BaseStateMachine {
     return raftFuture;
   }
 
+  @Override
+  public CompletableFuture<DataStream> stream(RaftClientRequest request) {
+    return CompletableFuture.supplyAsync(() -> {
+      try {
+        ContainerCommandRequestProto requestProto =
+            getContainerCommandRequestProto(gid,
+                request.getMessage().getContent());
+        DispatcherContext context =
+            new DispatcherContext.Builder()
+                .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
+                .setContainer2BCSIDMap(container2BCSIDMap)
+                .build();
+
+        ContainerCommandResponseProto response = runCommand(
+            requestProto, context);
+        String path = response.getMessage();
+        return new LocalStream(new StreamDataChannel(Paths.get(path)));
+      } catch (IOException e) {
+        throw new CompletionException("Failed to create data stream", e);
+      }
+    }, executor);
+  }
+
   private ExecutorService getChunkExecutor(WriteChunkRequestProto req) {
     int i = (int)(req.getBlockID().getLocalID() % chunkExecutors.size());
     return chunkExecutors.get(i);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
new file mode 100644
index 0000000000..baae013966
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/LocalStream.java
@@ -0,0 +1,50 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.transport.server.ratis;
+
+import org.apache.ratis.statemachine.StateMachine;
+
+import java.io.IOException;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
+
+class LocalStream implements StateMachine.DataStream {
+  private final StateMachine.DataChannel dataChannel;
+
+  LocalStream(StateMachine.DataChannel dataChannel) {
+    this.dataChannel = dataChannel;
+  }
+
+  @Override
+  public StateMachine.DataChannel getDataChannel() {
+    return dataChannel;
+  }
+
+  @Override
+  public CompletableFuture<?> cleanUp() {
+    return CompletableFuture.supplyAsync(() -> {
+      try {
+        dataChannel.close();
+        return true;
+      } catch (IOException e) {
+        throw new CompletionException("Failed to close data channel", e);
+      }
+    });
+  }
+}
\ No newline at end of file
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/StreamDataChannel.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/StreamDataChannel.java
new file mode 100644
index 0000000000..3df66e26dc
--- /dev/null
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/StreamDataChannel.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.common.transport.server.ratis;
+
+import org.apache.ratis.statemachine.StateMachine;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.RandomAccessFile;
+import java.nio.ByteBuffer;
+import java.nio.file.Path;
+
+class StreamDataChannel implements StateMachine.DataChannel {
+  private final Path path;
+  private final RandomAccessFile randomAccessFile;
+
+  StreamDataChannel(Path path) throws FileNotFoundException {
+    this.path = path;
+    this.randomAccessFile = new RandomAccessFile(path.toFile(), "rw");
+  }
+
+  @Override
+  public void force(boolean metadata) throws IOException {
+    randomAccessFile.getChannel().force(metadata);
+  }
+
+  @Override
+  public int write(ByteBuffer src) throws IOException {
+    return randomAccessFile.getChannel().write(src);
+  }
+
+  @Override
+  public boolean isOpen() {
+    return randomAccessFile.getChannel().isOpen();
+  }
+
+  @Override
+  public void close() throws IOException {
+    randomAccessFile.close();
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 21/38: HDDS-6139. [Ozone-Streaming] Fix incorrect computation of totalAckDataLength. (#2978)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 25c3495e5831955a178b677fd0e4578816ae68e4
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Thu Jan 13 11:24:12 2022 +0530

    HDDS-6139. [Ozone-Streaming] Fix incorrect computation of totalAckDataLength. (#2978)
    
    (cherry picked from commit 0db32a02233d84481b84513f1a6f7416c55753cf)
    (cherry picked from commit 6bd6cc7f2d605168432930f55138cba5b582f984)
---
 .../hdds/scm/storage/BlockDataStreamOutput.java       |  9 ++++++++-
 .../ozone/client/io/BlockDataStreamOutputEntry.java   |  2 +-
 .../ozone/client/rpc/TestBlockDataStreamOutput.java   | 19 +++++++++++++++++++
 3 files changed, 28 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 84968b68d5..6ef59dd6d8 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -133,6 +133,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   private long syncPosition = 0;
   private StreamBuffer currentBuffer;
   private XceiverClientMetrics metrics;
+  // buffers for which putBlock is yet to be executed
+  private List<StreamBuffer> buffersForPutBlock;
   /**
    * Creates a new BlockDataStreamOutput.
    *
@@ -287,6 +289,10 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
 
   private void writeChunk(StreamBuffer sb) throws IOException {
     bufferList.add(sb);
+    if (buffersForPutBlock == null) {
+      buffersForPutBlock = new ArrayList<>();
+    }
+    buffersForPutBlock.add(sb);
     ByteBuffer dup = sb.duplicate();
     dup.position(0);
     dup.limit(sb.position());
@@ -392,7 +398,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     final List<StreamBuffer> byteBufferList;
     if (!force) {
       Preconditions.checkNotNull(bufferList);
-      byteBufferList = bufferList;
+      byteBufferList = buffersForPutBlock;
+      buffersForPutBlock = null;
       Preconditions.checkNotNull(byteBufferList);
     } else {
       byteBufferList = null;
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
index 2cd5630549..4e5a35a539 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
@@ -156,7 +156,7 @@ public final class BlockDataStreamOutputEntry
     }
   }
 
-  long getTotalAckDataLength() {
+  public long getTotalAckDataLength() {
     if (byteBufferStreamOutput != null) {
       BlockDataStreamOutput out =
           (BlockDataStreamOutput) this.byteBufferStreamOutput;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index c9242df8b1..c6a3c32d2b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry;
 import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -256,4 +257,22 @@ public class TestBlockDataStreamOutput {
     validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
   }
 
+  @Test
+  public void testTotalAckDataLength() throws Exception {
+    int dataLength = 400;
+    String keyName = getKeyName();
+    OzoneDataStreamOutput key = createKey(
+        keyName, ReplicationType.RATIS, 0);
+    byte[] data =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
+            .getBytes(UTF_8);
+    KeyDataStreamOutput keyDataStreamOutput =
+        (KeyDataStreamOutput) key.getByteBufStreamOutput();
+    BlockDataStreamOutputEntry stream =
+        keyDataStreamOutput.getStreamEntries().get(0);
+    key.write(ByteBuffer.wrap(data));
+    key.close();
+    Assert.assertEquals(dataLength, stream.getTotalAckDataLength());
+  }
+
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 30/38: HDDS-6388. [Ozone-Streaming] Streaming write support both pipeline model and star model (#3145)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 8bbe943d23792b32d7aca6991b144182a059ff25
Author: micah zhao <mi...@tencent.com>
AuthorDate: Wed Mar 2 23:26:48 2022 +0800

    HDDS-6388. [Ozone-Streaming] Streaming write support both pipeline model and star model (#3145)
    
    (cherry picked from commit ab61b71d35b37fef3bd748d03b77aed7b6c6b4ee)
    (cherry picked from commit caa3b30a1e9336dcbae0600180c550a4806bcf3e)
---
 .../org/apache/hadoop/hdds/scm/OzoneClientConfig.java     | 15 +++++++++++++++
 .../hadoop/hdds/scm/storage/BlockDataStreamOutput.java    | 13 ++++++++++---
 2 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index 80cb14f03e..9f5cd5dab7 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -89,6 +89,13 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private long streamWindowSize = 64 * 1024 * 1024;
 
+  @Config(key = "datastream.pipeline.mode",
+      defaultValue = "true",
+      description = "Streaming write support both pipeline mode(datanode1->" +
+          "datanode2->datanode3) and star mode(datanode1->datanode2, " +
+          "datanode1->datanode3). By default we use pipeline mode.",
+      tags = ConfigTag.CLIENT)
+  private boolean datastreamPipelineMode = true;
 
   @Config(key = "stream.buffer.increment",
       defaultValue = "0B",
@@ -373,4 +380,12 @@ public class OzoneClientConfig {
   public String getFsDefaultBucketLayout() {
     return fsDefaultBucketLayout;
   }
+
+  public boolean isDatastreamPipelineMode() {
+    return datastreamPipelineMode;
+  }
+
+  public void setDatastreamPipelineMode(boolean datastreamPipelineMode) {
+    this.datastreamPipelineMode = datastreamPipelineMode;
+  }
 }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 8b3e32cf41..3df5eb0e12 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -137,6 +137,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   private XceiverClientMetrics metrics;
   // buffers for which putBlock is yet to be executed
   private List<StreamBuffer> buffersForPutBlock;
+  private boolean isDatastreamPipelineMode;
   /**
    * Creates a new BlockDataStreamOutput.
    *
@@ -154,6 +155,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
   ) throws IOException {
     this.xceiverClientFactory = xceiverClientManager;
     this.config = config;
+    this.isDatastreamPipelineMode = config.isDatastreamPipelineMode();
     this.blockID = new AtomicReference<>(blockID);
     KeyValue keyValue =
         KeyValue.newBuilder().setKey("TYPE").setValue("KEY").build();
@@ -203,9 +205,14 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     ContainerCommandRequestMessage message =
         ContainerCommandRequestMessage.toMessage(builder.build(), null);
 
-    return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
-    .stream(message.getContent().asReadOnlyByteBuffer(),
-        getRoutingTable(pipeline));
+    if (isDatastreamPipelineMode) {
+      return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
+          .stream(message.getContent().asReadOnlyByteBuffer(),
+              getRoutingTable(pipeline));
+    } else {
+      return Preconditions.checkNotNull(xceiverClient.getDataStreamApi())
+          .stream(message.getContent().asReadOnlyByteBuffer());
+    }
   }
 
   public RoutingTable getRoutingTable(Pipeline pipeline) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 08/38: HDDS-5742. Avoid unnecessary Bytebuffer conversions (#2673)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit b2133a5badda998f0e36092708d244efbb722a62
Author: micah zhao <mi...@tencent.com>
AuthorDate: Thu Sep 23 11:26:43 2021 +0800

    HDDS-5742. Avoid unnecessary Bytebuffer conversions (#2673)
    
    (cherry picked from commit bb4511840a93f1276194d243cbf0034061fefcd7)
    (cherry picked from commit b7b16c69778c63ebd08a2e029a1917893c7d2c4f)
---
 .../org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java    | 3 +--
 .../src/main/java/org/apache/hadoop/ozone/common/Checksum.java       | 5 +++++
 2 files changed, 6 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index d0419fa0c3..c69af90a91 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -485,8 +485,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       throws IOException {
     final int effectiveChunkSize = buf.remaining();
     final long offset = chunkOffset.getAndAdd(effectiveChunkSize);
-    ChecksumData checksumData =
-        checksum.computeChecksum(buf.asReadOnlyBuffer());
+    ChecksumData checksumData = checksum.computeChecksum(buf);
     ChunkInfo chunkInfo = ChunkInfo.newBuilder()
         .setChunkName(blockID.get().getLocalID() + "_chunk_" + ++chunkIndex)
         .setOffset(offset)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
index bb4b5e3ced..939527a5e2 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/Checksum.java
@@ -140,6 +140,11 @@ public class Checksum {
    */
   public ChecksumData computeChecksum(ByteBuffer data)
       throws OzoneChecksumException {
+    // If type is set to NONE, we do not need to compute the checksums. We also
+    // need to avoid unnecessary conversions.
+    if (checksumType == ChecksumType.NONE) {
+      return new ChecksumData(checksumType, bytesPerChecksum);
+    }
     if (!data.isReadOnly()) {
       data = data.asReadOnlyBuffer();
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 04/38: HDDS-5480. [Ozone-Streaming] Client and server should support stream setup. (#2452)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 0568725ac9e66c46c5918e86b80328c93c7c2bdb
Author: micah zhao <mi...@tencent.com>
AuthorDate: Wed Jul 28 20:22:53 2021 +0800

    HDDS-5480. [Ozone-Streaming] Client and server should support stream setup. (#2452)
    
    (cherry picked from commit 52306308c7eb68e4029c5fb3d316a1db44b9eb7a)
    (cherry picked from commit 0a4bbf5c7b1d7190df818c3828110fcba4261704)
---
 .../hadoop/hdds/protocol/DatanodeDetails.java      |  9 +++--
 .../org/apache/hadoop/hdds/ratis/RatisHelper.java  | 15 ++++++--
 .../org/apache/hadoop/ozone/OzoneConfigKeys.java   | 18 +++++++++
 .../org/apache/hadoop/ozone/audit/DNAction.java    |  3 +-
 .../helpers/ContainerCommandRequestPBHelper.java   |  1 +
 .../common/src/main/resources/ozone-default.xml    | 20 ++++++++++
 .../org/apache/hadoop/hdds/conf/ConfigTag.java     |  3 +-
 .../container/common/impl/HddsDispatcher.java      |  3 +-
 .../transport/server/ratis/XceiverServerRatis.java | 43 +++++++++++++++++++++-
 .../ozone/container/keyvalue/KeyValueHandler.java  | 33 +++++++++++++++++
 .../keyvalue/impl/ChunkManagerDispatcher.java      |  6 +++
 .../keyvalue/impl/FilePerBlockStrategy.java        |  8 ++++
 .../keyvalue/interfaces/ChunkManager.java          |  5 +++
 .../container/common/TestDatanodeStateMachine.java |  6 ++-
 .../hdds/conf/DatanodeRatisServerConfig.java       | 35 ++++++++++++++++++
 .../src/main/proto/DatanodeClientProtocol.proto    |  4 +-
 .../ozone/container/common/TestEndPoint.java       |  4 ++
 .../intellij/runConfigurations/Datanode2.xml       |  2 +-
 .../intellij/runConfigurations/Datanode3.xml       |  2 +-
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  |  1 +
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  |  3 ++
 .../apache/hadoop/ozone/TestMiniOzoneCluster.java  |  2 +
 .../server/TestSecureContainerServer.java          |  2 +
 23 files changed, 213 insertions(+), 15 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index 32358ef40a..04329ab2b0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -284,8 +284,10 @@ public class DatanodeDetails extends NodeImpl implements
         return port;
       }
     }
-    // if no separate admin/server port, return single Ratis one for compat
-    if (name == Name.RATIS_ADMIN || name == Name.RATIS_SERVER) {
+    // if no separate admin/server/datastream port, return single Ratis one for
+    // compat
+    if (name == Name.RATIS_ADMIN || name == Name.RATIS_SERVER ||
+        name == Name.RATIS_DATASTREAM) {
       return getPort(Name.RATIS);
     }
     return null;
@@ -795,7 +797,8 @@ public class DatanodeDetails extends NodeImpl implements
      * Ports that are supported in DataNode.
      */
     public enum Name {
-      STANDALONE, RATIS, REST, REPLICATION, RATIS_ADMIN, RATIS_SERVER;
+      STANDALONE, RATIS, REST, REPLICATION, RATIS_ADMIN, RATIS_SERVER,
+      RATIS_DATASTREAM;
 
       public static final Set<Name> ALL_PORTS = ImmutableSet.copyOf(
           Name.values());
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
index be6076a918..164044ced0 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/ratis/RatisHelper.java
@@ -46,6 +46,7 @@ import org.apache.ratis.client.RaftClient;
 import org.apache.ratis.client.RaftClientConfigKeys;
 import org.apache.ratis.conf.Parameters;
 import org.apache.ratis.conf.RaftProperties;
+import org.apache.ratis.datastream.SupportedDataStreamType;
 import org.apache.ratis.grpc.GrpcConfigKeys;
 import org.apache.ratis.grpc.GrpcTlsConfig;
 import org.apache.ratis.proto.RaftProtos;
@@ -134,7 +135,9 @@ public final class RatisHelper {
         .setId(toRaftPeerId(dn))
         .setAddress(toRaftPeerAddress(dn, Port.Name.RATIS_SERVER))
         .setAdminAddress(toRaftPeerAddress(dn, Port.Name.RATIS_ADMIN))
-        .setClientAddress(toRaftPeerAddress(dn, Port.Name.RATIS));
+        .setClientAddress(toRaftPeerAddress(dn, Port.Name.RATIS))
+        .setDataStreamAddress(
+            toRaftPeerAddress(dn, Port.Name.RATIS_DATASTREAM));
   }
 
   private static List<RaftPeer> toRaftPeers(Pipeline pipeline) {
@@ -188,6 +191,7 @@ public final class RatisHelper {
       ConfigurationSource ozoneConfiguration) throws IOException {
     return newRaftClient(rpcType,
         toRaftPeerId(pipeline.getLeaderNode()),
+        toRaftPeer(pipeline.getFirstNode()),
         newRaftGroup(RaftGroupId.valueOf(pipeline.getId().getId()),
             pipeline.getNodes()), retryPolicy, tlsConfig, ozoneConfiguration);
   }
@@ -207,7 +211,7 @@ public final class RatisHelper {
   public static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
       RetryPolicy retryPolicy, GrpcTlsConfig tlsConfig,
       ConfigurationSource configuration) {
-    return newRaftClient(rpcType, leader.getId(),
+    return newRaftClient(rpcType, leader.getId(), leader,
         newRaftGroup(Collections.singletonList(leader)), retryPolicy,
         tlsConfig, configuration);
   }
@@ -215,14 +219,14 @@ public final class RatisHelper {
   public static RaftClient newRaftClient(RpcType rpcType, RaftPeer leader,
       RetryPolicy retryPolicy,
       ConfigurationSource ozoneConfiguration) {
-    return newRaftClient(rpcType, leader.getId(),
+    return newRaftClient(rpcType, leader.getId(), leader,
         newRaftGroup(Collections.singletonList(leader)), retryPolicy, null,
         ozoneConfiguration);
   }
 
   @SuppressWarnings("checkstyle:ParameterNumber")
   private static RaftClient newRaftClient(RpcType rpcType, RaftPeerId leader,
-      RaftGroup group, RetryPolicy retryPolicy,
+      RaftPeer primary, RaftGroup group, RetryPolicy retryPolicy,
       GrpcTlsConfig tlsConfig, ConfigurationSource ozoneConfiguration) {
     if (LOG.isTraceEnabled()) {
       LOG.trace("newRaftClient: {}, leader={}, group={}",
@@ -236,6 +240,7 @@ public final class RatisHelper {
     return RaftClient.newBuilder()
         .setRaftGroup(group)
         .setLeaderId(leader)
+        .setPrimaryDataStreamServer(primary)
         .setProperties(properties)
         .setParameters(setClientTlsConf(rpcType, tlsConfig))
         .setRetryPolicy(retryPolicy)
@@ -293,6 +298,8 @@ public final class RatisHelper {
   public static RaftProperties setRpcType(RaftProperties properties,
       RpcType rpcType) {
     RaftConfigKeys.Rpc.setType(properties, rpcType);
+    RaftConfigKeys.DataStream.setType(properties,
+        SupportedDataStreamType.NETTY);
     return properties;
   }
 
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
index d8c1a984c6..b9365d7e04 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
@@ -57,6 +57,12 @@ public final class OzoneConfigKeys {
   public static final boolean DFS_CONTAINER_IPC_RANDOM_PORT_DEFAULT =
       false;
 
+  public static final String DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT =
+      "dfs.container.ratis.datastream.random.port";
+  public static final boolean
+      DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT =
+      false;
+
   public static final String DFS_CONTAINER_CHUNK_WRITE_SYNC_KEY =
       "dfs.container.chunk.write.sync";
   public static final boolean DFS_CONTAINER_CHUNK_WRITE_SYNC_DEFAULT = false;
@@ -79,6 +85,18 @@ public final class OzoneConfigKeys {
       "dfs.container.ratis.server.port";
   public static final int DFS_CONTAINER_RATIS_SERVER_PORT_DEFAULT = 9856;
 
+  /**
+   * Ratis Port where containers listen to datastream requests.
+   */
+  public static final String DFS_CONTAINER_RATIS_DATASTREAM_ENABLE
+      = "dfs.container.ratis.datastream.enable";
+  public static final boolean DFS_CONTAINER_RATIS_DATASTREAM_ENABLE_DEFAULT
+      = true;
+  public static final String DFS_CONTAINER_RATIS_DATASTREAM_PORT
+      = "dfs.container.ratis.datastream.port";
+  public static final int DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT
+      = 9855;
+
   /**
    * When set to true, allocate a random free port for ozone container, so that
    * a mini cluster is able to launch multiple containers on a node.
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
index 1c87f2bdeb..73aff9ac83 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/audit/DNAction.java
@@ -38,7 +38,8 @@ public enum DNAction implements AuditAction {
   PUT_SMALL_FILE,
   GET_SMALL_FILE,
   CLOSE_CONTAINER,
-  GET_COMMITTED_BLOCK_LENGTH;
+  GET_COMMITTED_BLOCK_LENGTH,
+  STREAM_INIT;
 
   @Override
   public String getAction() {
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
index a13f164eec..4d7f0f37c4 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerCommandRequestPBHelper.java
@@ -187,6 +187,7 @@ public final class ContainerCommandRequestPBHelper {
     case GetSmallFile     : return DNAction.GET_SMALL_FILE;
     case CloseContainer   : return DNAction.CLOSE_CONTAINER;
     case GetCommittedBlockLength : return DNAction.GET_COMMITTED_BLOCK_LENGTH;
+    case StreamInit       : return DNAction.STREAM_INIT;
     default :
       LOG.debug("Invalid command type - {}", cmdType);
       return null;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index a481ba5e36..5f2743bbc5 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -53,6 +53,26 @@
     <tag>OZONE, CONTAINER, MANAGEMENT</tag>
     <description>The ipc port number of container.</description>
   </property>
+  <property>
+    <name>dfs.container.ratis.datastream.enable</name>
+    <value>true</value>
+    <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
+    <description>If enable datastream ipc of container.</description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.datastream.port</name>
+    <value>9855</value>
+    <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
+    <description>The datastream port number of container.</description>
+  </property>
+  <property>
+    <name>dfs.container.ratis.datastream.random.port</name>
+    <value>false</value>
+    <tag>OZONE, CONTAINER, RATIS, DATASTREAM</tag>
+    <description>Allocates a random free port for ozone container datastream.
+      This is used only while running unit tests.
+    </description>
+  </property>
   <property>
     <name>dfs.container.ipc.random.port</name>
     <value>false</value>
diff --git a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
index 8cf584d75f..3728a0b1f5 100644
--- a/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
+++ b/hadoop-hdds/config/src/main/java/org/apache/hadoop/hdds/conf/ConfigTag.java
@@ -46,5 +46,6 @@ public enum ConfigTag {
   DELETION,
   HA,
   BALANCER,
-  UPGRADE
+  UPGRADE,
+  DATASTREAM
 }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
index 6dde0181a8..a4693b227e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
@@ -200,7 +200,8 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
     boolean isWriteStage =
         (cmdType == Type.WriteChunk && dispatcherContext != null
             && dispatcherContext.getStage()
-            == DispatcherContext.WriteChunkStage.WRITE_DATA);
+            == DispatcherContext.WriteChunkStage.WRITE_DATA)
+            || (cmdType == Type.StreamInit);
     boolean isWriteCommitStage =
         (cmdType == Type.WriteChunk && dispatcherContext != null
             && dispatcherContext.getStage()
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
index c8d715cc60..d69b64cce1 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/XceiverServerRatis.java
@@ -98,6 +98,7 @@ import org.apache.ratis.protocol.RaftGroupMemberId;
 import org.apache.ratis.protocol.RaftPeerId;
 import org.apache.ratis.rpc.RpcType;
 import org.apache.ratis.rpc.SupportedRpcType;
+import org.apache.ratis.server.DataStreamServerRpc;
 import org.apache.ratis.server.RaftServer;
 import org.apache.ratis.server.RaftServerConfigKeys;
 import org.apache.ratis.server.RaftServerRpc;
@@ -129,6 +130,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   private int serverPort;
   private int adminPort;
   private int clientPort;
+  private int dataStreamPort;
   private final RaftServer server;
   private final List<ThreadPoolExecutor> chunkExecutors;
   private final ContainerDispatcher dispatcher;
@@ -148,6 +150,7 @@ public final class XceiverServerRatis implements XceiverServerSpi {
   // Timeout used while calling submitRequest directly.
   private long requestTimeout;
   private boolean shouldDeleteRatisLogDirectory;
+  private boolean streamEnable;
 
   private XceiverServerRatis(DatanodeDetails dd,
       ContainerDispatcher dispatcher, ContainerController containerController,
@@ -157,6 +160,9 @@ public final class XceiverServerRatis implements XceiverServerSpi {
     Objects.requireNonNull(dd, "id == null");
     datanodeDetails = dd;
     assignPorts();
+    this.streamEnable = conf.getBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLE,
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_ENABLE_DEFAULT);
     RaftProperties serverProperties = newRaftProperties();
     this.context = context;
     this.dispatcher = dispatcher;
@@ -213,6 +219,32 @@ public final class XceiverServerRatis implements XceiverServerSpi {
         chunkExecutors, this, conf);
   }
 
+  private void setUpRatisStream(RaftProperties properties) {
+    // set the datastream config
+    if (conf.getBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT,
+        OzoneConfigKeys.
+            DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT_DEFAULT)) {
+      dataStreamPort = 0;
+    } else {
+      dataStreamPort = conf.getInt(
+          OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT,
+          OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_PORT_DEFAULT);
+    }
+    NettyConfigKeys.DataStream.setPort(properties, dataStreamPort);
+    int dataStreamAsyncRequestThreadPoolSize =
+        conf.getObject(DatanodeRatisServerConfig.class)
+            .getStreamRequestThreads();
+    RaftServerConfigKeys.DataStream.setAsyncRequestThreadPoolSize(properties,
+        dataStreamAsyncRequestThreadPoolSize);
+    int dataStreamWriteRequestThreadPoolSize =
+        conf.getObject(DatanodeRatisServerConfig.class)
+            .getStreamWriteThreads();
+    RaftServerConfigKeys.DataStream.setAsyncWriteThreadPoolSize(properties,
+        dataStreamWriteRequestThreadPoolSize);
+  }
+
+  @SuppressWarnings("checkstyle:methodlength")
   private RaftProperties newRaftProperties() {
     final RaftProperties properties = new RaftProperties();
 
@@ -231,6 +263,10 @@ public final class XceiverServerRatis implements XceiverServerSpi {
 
     // set the configs enable and set the stateMachineData sync timeout
     RaftServerConfigKeys.Log.StateMachineData.setSync(properties, true);
+    if (streamEnable) {
+      setUpRatisStream(properties);
+    }
+
     timeUnit = OzoneConfigKeys.
         DFS_CONTAINER_RATIS_STATEMACHINEDATA_SYNC_TIMEOUT_DEFAULT.getUnit();
     duration = conf.getTimeDuration(
@@ -491,7 +527,12 @@ public final class XceiverServerRatis implements XceiverServerSpi {
           Port.Name.RATIS_ADMIN);
       serverPort = getRealPort(serverRpc.getInetSocketAddress(),
           Port.Name.RATIS_SERVER);
-
+      if (streamEnable) {
+        DataStreamServerRpc dataStreamServerRpc =
+            server.getDataStreamServerRpc();
+        dataStreamPort = getRealPort(dataStreamServerRpc.getInetSocketAddress(),
+            Port.Name.RATIS_DATASTREAM);
+      }
       isStarted = true;
     }
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
index 67e9d13c27..9be52d6403 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
@@ -101,6 +101,7 @@ import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuil
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadChunkResponse;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getReadContainerResponse;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponse;
+import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.getSuccessResponseBuilder;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.malformedRequest;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.putBlockResponseSuccess;
 import static org.apache.hadoop.hdds.scm.protocolPB.ContainerCommandResponseBuilders.unsupportedRequest;
@@ -230,6 +231,8 @@ public class KeyValueHandler extends Handler {
       return handler.handleDeleteChunk(request, kvContainer);
     case WriteChunk:
       return handler.handleWriteChunk(request, kvContainer, dispatcherContext);
+    case StreamInit:
+      return handler.handleStreamInit(request, kvContainer, dispatcherContext);
     case ListChunk:
       return handler.handleUnsupportedOp(request);
     case CompactChunk:
@@ -256,6 +259,36 @@ public class KeyValueHandler extends Handler {
     return this.blockManager;
   }
 
+  ContainerCommandResponseProto handleStreamInit(
+      ContainerCommandRequestProto request, KeyValueContainer kvContainer,
+      DispatcherContext dispatcherContext) {
+    if (!request.hasWriteChunk()) {
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Malformed Write Chunk request. trace ID: {}",
+            request.getTraceID());
+      }
+      return malformedRequest(request);
+    }
+
+    String path = null;
+    try {
+      checkContainerOpen(kvContainer);
+
+      WriteChunkRequestProto writeChunk = request.getWriteChunk();
+      BlockID blockID = BlockID.getFromProtobuf(writeChunk.getBlockID());
+
+      path = chunkManager
+          .streamInit(kvContainer, blockID);
+
+    } catch (StorageContainerException ex) {
+      return ContainerUtils.logAndReturnError(LOG, ex, request);
+    }
+
+    return getSuccessResponseBuilder(request)
+        .setMessage(path)
+        .build();
+  }
+
   /**
    * Handles Create Container Request. If successful, adds the container to
    * ContainerSet and sends an ICR to the SCM.
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
index 763647313b..3e2ab46470 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDispatcher.java
@@ -73,6 +73,12 @@ public class ChunkManagerDispatcher implements ChunkManager {
         .writeChunk(container, blockID, info, data, dispatcherContext);
   }
 
+  public String streamInit(Container container, BlockID blockID)
+      throws StorageContainerException {
+    return selectHandler(container)
+        .streamInit(container, blockID);
+  }
+
   @Override
   public void finishWriteChunks(KeyValueContainer kvContainer,
       BlockData blockData) throws IOException {
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 51cd5708d3..9efc6bc351 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -89,6 +89,14 @@ public class FilePerBlockStrategy implements ChunkManager {
         container.getContainerData().getLayoutVersion() == FILE_PER_BLOCK);
   }
 
+  @Override
+  public String streamInit(Container container, BlockID blockID)
+      throws StorageContainerException {
+    checkLayoutVersion(container);
+    File chunkFile = getChunkFile(container, blockID, null);
+    return chunkFile.getAbsolutePath();
+  }
+
   @Override
   public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
       ChunkBuffer data, DispatcherContext dispatcherContext)
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
index 15ff9d6b9d..ba06eebd69 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/interfaces/ChunkManager.java
@@ -104,6 +104,11 @@ public interface ChunkManager {
     // no-op
   }
 
+  default String streamInit(Container container, BlockID blockID)
+      throws StorageContainerException {
+    return null;
+  }
+
   static long getBufferCapacityForChunkRead(ChunkInfo chunkInfo,
       long defaultReadBufferCapacity) {
     long bufferCapacity = 0;
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 3107e16c2a..8aa1222233 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -81,6 +81,8 @@ public class TestDatanodeStateMachine {
         TimeUnit.MILLISECONDS);
     conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT, true);
     conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
+    conf.setBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
     serverAddresses = new ArrayList<>();
     scmServers = new ArrayList<>();
     mockServers = new ArrayList<>();
@@ -215,7 +217,6 @@ public class TestDatanodeStateMachine {
         OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
     datanodeDetails.setPort(port);
     ContainerUtils.writeDatanodeDetailsTo(datanodeDetails, idPath);
-
     try (DatanodeStateMachine stateMachine =
              new DatanodeStateMachine(datanodeDetails, conf, null, null,
                  null)) {
@@ -424,6 +425,8 @@ public class TestDatanodeStateMachine {
         DatanodeDetails.Port.Name.RATIS, 0);
     DatanodeDetails.Port restPort = DatanodeDetails.newPort(
         DatanodeDetails.Port.Name.REST, 0);
+    DatanodeDetails.Port streamPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS_DATASTREAM, 0);
     return DatanodeDetails.newBuilder()
         .setUuid(UUID.randomUUID())
         .setHostName("localhost")
@@ -431,6 +434,7 @@ public class TestDatanodeStateMachine {
         .addPort(containerPort)
         .addPort(ratisPort)
         .addPort(restPort)
+        .addPort(streamPort)
         .build();
   }
 }
diff --git a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
index 25ed4776b7..205d92e955 100644
--- a/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
+++ b/hadoop-hdds/framework/src/main/java/org/apache/hadoop/hdds/conf/DatanodeRatisServerConfig.java
@@ -23,6 +23,7 @@ import org.apache.ratis.server.RaftServerConfigKeys;
 import java.time.Duration;
 
 import static org.apache.hadoop.hdds.conf.ConfigTag.DATANODE;
+import static org.apache.hadoop.hdds.conf.ConfigTag.DATASTREAM;
 import static org.apache.hadoop.hdds.conf.ConfigTag.OZONE;
 import static org.apache.hadoop.hdds.conf.ConfigTag.PERFORMANCE;
 import static org.apache.hadoop.hdds.conf.ConfigTag.RATIS;
@@ -123,6 +124,40 @@ public class DatanodeRatisServerConfig {
     this.leaderNumPendingRequests = leaderNumPendingRequests;
   }
 
+  @Config(key = "datastream.request.threads",
+      defaultValue = "20",
+      type = ConfigType.INT,
+      tags = {OZONE, DATANODE, RATIS, DATASTREAM},
+      description = "Maximum number of threads in the thread pool for " +
+          "datastream request."
+  )
+  private int streamRequestThreads;
+
+  public int getStreamRequestThreads() {
+    return streamRequestThreads;
+  }
+
+  public void setStreamRequestThreads(int streamRequestThreads) {
+    this.streamRequestThreads = streamRequestThreads;
+  }
+
+  @Config(key = "datastream.write.threads",
+      defaultValue = "20",
+      type = ConfigType.INT,
+      tags = {OZONE, DATANODE, RATIS, DATASTREAM},
+      description = "Maximum number of threads in the thread pool for " +
+          "datastream write."
+  )
+  private int streamWriteThreads;
+
+  public int getStreamWriteThreads() {
+    return streamWriteThreads;
+  }
+
+  public void setStreamWriteThreads(int streamWriteThreads) {
+    this.streamWriteThreads = streamWriteThreads;
+  }
+
   @Config(key = "delete.ratis.log.directory",
           defaultValue = "true",
           type = ConfigType.BOOLEAN,
diff --git a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
index c16059c5c4..423e63babd 100644
--- a/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
+++ b/hadoop-hdds/interface-client/src/main/proto/DatanodeClientProtocol.proto
@@ -100,6 +100,8 @@ enum Type {
   GetSmallFile = 16;
   CloseContainer = 17;
   GetCommittedBlockLength = 18;
+
+  StreamInit = 19;
 }
 
 
@@ -400,7 +402,7 @@ enum ChecksumType {
 
 message  WriteChunkRequestProto  {
   required DatanodeBlockID blockID = 1;
-  required ChunkInfo chunkData = 2;
+  optional ChunkInfo chunkData = 2;
   optional bytes data = 3;
 }
 
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 0230109fa4..56a04de02c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -144,6 +144,8 @@ public class TestEndPoint {
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, 1000)) {
       DatanodeDetails datanodeDetails = randomDatanodeDetails();
+      conf.setBoolean(
+          OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
       OzoneContainer ozoneContainer = new OzoneContainer(
           datanodeDetails, conf, getContext(datanodeDetails), null);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
@@ -168,6 +170,8 @@ public class TestEndPoint {
         true);
     conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
         true);
+    conf.setBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
     conf.setFromObject(new ReplicationConfig().setPort(0));
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, 1000)) {
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
index 3d3302030d..040b515b9f 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode2.xml
@@ -18,7 +18,7 @@
   <configuration default="false" name="Datanode2" type="Application" factoryName="Application">
     <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.HddsDatanodeService" />
     <module name="ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set ozone.metadata.dirs=/tmp/datanode2 --set hdds.datanode.dir=/tmp/datanode2/storage --set hdds.datanode.http-address=127.0.0.1:10021 --set dfs.container.ratis.ipc=10022 --set dfs.container.ipc=10023 --set dfs.container.ratis.server.port=10024 --set dfs.container.ratis.admin.port=10025 --set hdds.datanode.replication.port=10026" />
+    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set ozone.metadata.dirs=/tmp/datanode2 --set hdds.datanode.dir=/tmp/datanode2/storage --set hdds.datanode.http-address=127.0.0.1:10021 --set dfs.container.ratis.ipc=10022 --set dfs.container.ipc=10023 --set dfs.container.ratis.server.port=10024 --set dfs.container.ratis.admin.port=10025 --set hdds.datanode.replication.port=10026 --set dfs.container.ratis.datastream.port=10027" />
     <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
     <extension name="coverage">
       <pattern>
diff --git a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
index 10b6682a0e..6a3116e0fd 100644
--- a/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
+++ b/hadoop-ozone/dev-support/intellij/runConfigurations/Datanode3.xml
@@ -18,7 +18,7 @@
   <configuration default="false" name="Datanode3" type="Application" factoryName="Application">
     <option name="MAIN_CLASS_NAME" value="org.apache.hadoop.ozone.HddsDatanodeService" />
     <module name="ozone-datanode" />
-    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set ozone.metadata.dirs=/tmp/datanode3 --set hdds.datanode.dir=/tmp/datanode3/storage --set hdds.datanode.http-address=127.0.0.1:10031 --set dfs.container.ratis.ipc=10032 --set dfs.container.ipc=10033 --set dfs.container.ratis.server.port=10034 --set dfs.container.ratis.admin.port=10035 --set hdds.datanode.replication.port=10036" />
+    <option name="PROGRAM_PARAMETERS" value="-conf=hadoop-ozone/dev-support/intellij/ozone-site.xml --set ozone.metadata.dirs=/tmp/datanode3 --set hdds.datanode.dir=/tmp/datanode3/storage --set hdds.datanode.http-address=127.0.0.1:10031 --set dfs.container.ratis.ipc=10032 --set dfs.container.ipc=10033 --set dfs.container.ratis.server.port=10034 --set dfs.container.ratis.admin.port=10035 --set hdds.datanode.replication.port=10036 --set dfs.container.ratis.datastream.port=10037" />
     <option name="VM_PARAMETERS" value="-Dlog4j.configuration=file:hadoop-ozone/dev-support/intellij/log4j.properties" />
     <extension name="coverage">
       <pattern>
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index 0eb2e30f17..fa2d192058 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -320,6 +320,7 @@ public interface MiniOzoneCluster {
     protected Optional<String> omId = Optional.empty();
     
     protected Boolean randomContainerPort = true;
+    protected Boolean randomContainerStreamPort = true;
     protected Optional<String> datanodeReservedSpace = Optional.empty();
     protected Optional<Integer> chunkSize = Optional.empty();
     protected OptionalInt streamBufferSize = OptionalInt.empty();
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 1bdaef3a9b..2421daeaa6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -90,6 +90,7 @@ import static org.apache.hadoop.hdds.scm.ScmConfig.ConfigStrings.HDDS_SCM_INIT_D
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_ADMIN_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_RATIS_SERVER_PORT;
@@ -916,6 +917,8 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
           randomContainerPort);
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
           randomContainerPort);
+      conf.setBoolean(DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT,
+          randomContainerStreamPort);
 
       conf.setFromObject(new ReplicationConfig().setPort(0));
     }
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 09fb5e9ef7..7da2bc8e17 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -216,6 +216,8 @@ public class TestMiniOzoneCluster {
     ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, true);
     ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_RANDOM_PORT,
         true);
+    ozoneConf.setBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
     List<DatanodeStateMachine> stateMachines = new ArrayList<>();
     try {
 
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
index 73453acebc..51d617a4e0 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestSecureContainerServer.java
@@ -216,6 +216,8 @@ public class TestSecureContainerServer {
       DatanodeDetails dn, OzoneConfiguration conf) throws IOException {
     conf.setInt(OzoneConfigKeys.DFS_CONTAINER_RATIS_IPC_PORT,
         dn.getPort(DatanodeDetails.Port.Name.RATIS).getValue());
+    conf.setBoolean(
+        OzoneConfigKeys.DFS_CONTAINER_RATIS_DATASTREAM_RANDOM_PORT, true);
     final String dir = TEST_DIR + dn.getUuid();
     conf.set(OzoneConfigKeys.DFS_CONTAINER_RATIS_DATANODE_STORAGE_DIR, dir);
     final ContainerDispatcher dispatcher = createDispatcher(dn,


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 11/38: HDDS-5674.[Ozone-Streaming] Handle client retries on exception (#2701)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 5b73b5148f088a9203d2740434ced512d2ac7b53
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Thu Oct 21 12:57:30 2021 +0530

    HDDS-5674.[Ozone-Streaming] Handle client retries on exception (#2701)
    
    (cherry picked from commit 3daf3d7a8fbe08a6ffc095e53a874d4295380cf8)
    (cherry picked from commit 508fda8744a9bd04bb1cb6c4ee12df5581e4dded)
---
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 62 ++++++++++++---
 .../hadoop/hdds/scm/storage/StreamBuffer.java      | 46 +++++++++++
 .../hdds/scm/storage/StreamCommitWatcher.java      | 93 ++++++++++++++++++----
 .../client/io/BlockDataStreamOutputEntry.java      | 33 +++++++-
 .../client/io/BlockDataStreamOutputEntryPool.java  | 14 +++-
 .../ozone/client/io/KeyDataStreamOutput.java       | 12 ++-
 .../client/rpc/TestBlockDataStreamOutput.java      | 30 +++++++
 .../apache/hadoop/ozone/container/TestHelper.java  | 20 +++++
 8 files changed, 279 insertions(+), 31 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index 2ae0ba7525..aada48e2f5 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -92,6 +92,11 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
 
   private int chunkIndex;
   private final AtomicLong chunkOffset = new AtomicLong();
+
+  // Similar to 'BufferPool' but this list maintains only references
+  // to the ByteBuffers.
+  private List<StreamBuffer> bufferList;
+
   // The IOException will be set by response handling thread in case there is an
   // exception received in the response. If the exception is set, the next
   // request will fail upfront.
@@ -133,7 +138,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       XceiverClientFactory xceiverClientManager,
       Pipeline pipeline,
       OzoneClientConfig config,
-      Token<? extends TokenIdentifier> token
+      Token<? extends TokenIdentifier> token,
+      List<StreamBuffer> bufferList
   ) throws IOException {
     this.xceiverClientFactory = xceiverClientManager;
     this.config = config;
@@ -148,7 +154,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     // Alternatively, stream setup can be delayed till the first chunk write.
     this.out = setupStream(pipeline);
     this.token = token;
-
+    this.bufferList = bufferList;
     flushPeriod = (int) (config.getStreamBufferFlushSize() / config
         .getStreamBufferSize());
 
@@ -159,7 +165,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
 
     // A single thread executor handle the responses of async requests
     responseExecutor = Executors.newSingleThreadExecutor();
-    commitWatcher = new StreamCommitWatcher(xceiverClient);
+    commitWatcher = new StreamCommitWatcher(xceiverClient, bufferList);
     totalDataFlushedLength = 0;
     writtenDataLength = 0;
     failedServers = new ArrayList<>(0);
@@ -251,8 +257,11 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     if (len == 0) {
       return;
     }
-    writeChunkToContainer(
-            (ByteBuffer) b.asReadOnlyBuffer().position(off).limit(off + len));
+
+    final StreamBuffer buf = new StreamBuffer(b, off, len);
+    bufferList.add(buf);
+
+    writeChunkToContainer(buf.duplicate());
 
     writtenDataLength += len;
   }
@@ -261,6 +270,10 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     totalDataFlushedLength = writtenDataLength;
   }
 
+  @VisibleForTesting
+  public long getTotalDataFlushedLength() {
+    return totalDataFlushedLength;
+  }
   /**
    * Will be called on the retryPath in case closedContainerException/
    * TimeoutException.
@@ -268,8 +281,27 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
    * @throws IOException if error occurred
    */
 
-  // TODO: We need add new retry policy without depend on bufferPool.
   public void writeOnRetry(long len) throws IOException {
+    if (len == 0) {
+      return;
+    }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Retrying write length {} for blockID {}", len, blockID);
+    }
+    int count = 0;
+    while (len > 0) {
+      final StreamBuffer buf = bufferList.get(count);
+      final long writeLen = Math.min(buf.length(), len);
+      final ByteBuffer duplicated = buf.duplicate();
+      if (writeLen != buf.length()) {
+        duplicated.limit(Math.toIntExact(len));
+      }
+      writeChunkToContainer(duplicated);
+      len -= writeLen;
+      count++;
+      writtenDataLength += writeLen;
+    }
+
 
   }
 
@@ -314,6 +346,14 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
       boolean force) throws IOException {
     checkOpen();
     long flushPos = totalDataFlushedLength;
+    final List<StreamBuffer> byteBufferList;
+    if (!force) {
+      Preconditions.checkNotNull(bufferList);
+      byteBufferList = bufferList;
+      Preconditions.checkNotNull(byteBufferList);
+    } else {
+      byteBufferList = null;
+    }
     flush();
     if (close) {
       dataStreamCloseReply = out.closeAsync();
@@ -344,12 +384,12 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
           if (LOG.isDebugEnabled()) {
             LOG.debug(
                 "Adding index " + asyncReply.getLogIndex() + " commitMap size "
-                    + commitWatcher.getCommitInfoSetSize() + " flushLength "
+                    + commitWatcher.getCommitInfoMapSize() + " flushLength "
                     + flushPos + " blockID " + blockID);
           }
           // for standalone protocol, logIndex will always be 0.
-          commitWatcher.updateCommitInfoSet(
-              asyncReply.getLogIndex());
+          commitWatcher
+              .updateCommitInfoMap(asyncReply.getLogIndex(), byteBufferList);
         }
         return e;
       }, responseExecutor).exceptionally(e -> {
@@ -589,4 +629,8 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     setIoException(ex);
     throw getIoException();
   }
+
+  public long getTotalAckDataLength() {
+    return commitWatcher.getTotalAckDataLength();
+  }
 }
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
new file mode 100644
index 0000000000..f36019e2ae
--- /dev/null
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamBuffer.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.storage;
+
+import java.nio.ByteBuffer;
+
+/**
+ * Used for streaming write.
+ */
+public class StreamBuffer {
+  private final ByteBuffer buffer;
+
+  public StreamBuffer(ByteBuffer buffer) {
+    this.buffer = buffer.asReadOnlyBuffer();
+  }
+
+  public StreamBuffer(ByteBuffer buffer, int offset, int length) {
+    this((ByteBuffer) buffer.asReadOnlyBuffer().position(offset)
+        .limit(offset + length));
+  }
+
+  public ByteBuffer duplicate() {
+    return buffer.duplicate();
+  }
+
+  public int length() {
+    return buffer.limit() - buffer.position();
+  }
+
+}
\ No newline at end of file
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
index c187ffe902..3a59d07571 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/StreamCommitWatcher.java
@@ -24,6 +24,7 @@
  */
 package org.apache.hadoop.hdds.scm.storage;
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.scm.XceiverClientReply;
 import org.apache.hadoop.hdds.scm.XceiverClientSpi;
@@ -31,13 +32,16 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.util.Set;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
-import java.util.concurrent.ConcurrentSkipListSet;
+import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.TimeoutException;
+import java.util.stream.Collectors;
 
 /**
  * This class executes watchForCommit on ratis pipeline and releases
@@ -48,7 +52,12 @@ public class StreamCommitWatcher {
   private static final Logger LOG =
       LoggerFactory.getLogger(StreamCommitWatcher.class);
 
-  private Set<Long> commitIndexSet;
+  private Map<Long, List<StreamBuffer>> commitIndexMap;
+  private List<StreamBuffer> bufferList;
+
+  // total data which has been successfully flushed and acknowledged
+  // by all servers
+  private long totalAckDataLength;
 
   // future Map to hold up all putBlock futures
   private ConcurrentHashMap<Long,
@@ -57,18 +66,22 @@ public class StreamCommitWatcher {
 
   private XceiverClientSpi xceiverClient;
 
-  public StreamCommitWatcher(XceiverClientSpi xceiverClient) {
+  public StreamCommitWatcher(XceiverClientSpi xceiverClient,
+      List<StreamBuffer> bufferList) {
     this.xceiverClient = xceiverClient;
-    commitIndexSet = new ConcurrentSkipListSet();
+    commitIndexMap = new ConcurrentSkipListMap<>();
     futureMap = new ConcurrentHashMap<>();
+    this.bufferList = bufferList;
+    totalAckDataLength = 0;
   }
 
-  public void updateCommitInfoSet(long index) {
-    commitIndexSet.add(index);
+  public void updateCommitInfoMap(long index, List<StreamBuffer> buffers) {
+    commitIndexMap.computeIfAbsent(index, k -> new LinkedList<>())
+        .addAll(buffers);
   }
 
-  int getCommitInfoSetSize() {
-    return commitIndexSet.size();
+  int getCommitInfoMapSize() {
+    return commitIndexMap.size();
   }
 
   /**
@@ -78,12 +91,12 @@ public class StreamCommitWatcher {
    * @throws IOException in case watchForCommit fails
    */
   public XceiverClientReply streamWatchOnFirstIndex() throws IOException {
-    if (!commitIndexSet.isEmpty()) {
+    if (!commitIndexMap.isEmpty()) {
       // wait for the  first commit index in the commitIndex2flushedDataMap
       // to get committed to all or majority of nodes in case timeout
       // happens.
       long index =
-          commitIndexSet.stream().mapToLong(v -> v).min()
+          commitIndexMap.keySet().stream().mapToLong(v -> v).min()
               .getAsLong();
       if (LOG.isDebugEnabled()) {
         LOG.debug("waiting for first index {} to catch up", index);
@@ -102,12 +115,12 @@ public class StreamCommitWatcher {
    */
   public XceiverClientReply streamWatchOnLastIndex()
       throws IOException {
-    if (!commitIndexSet.isEmpty()) {
+    if (!commitIndexMap.isEmpty()) {
       // wait for the  commit index in the commitIndex2flushedDataMap
       // to get committed to all or majority of nodes in case timeout
       // happens.
       long index =
-          commitIndexSet.stream().mapToLong(v -> v).max()
+          commitIndexMap.keySet().stream().mapToLong(v -> v).max()
               .getAsLong();
       if (LOG.isDebugEnabled()) {
         LOG.debug("waiting for last flush Index {} to catch up", index);
@@ -127,9 +140,16 @@ public class StreamCommitWatcher {
    */
   public XceiverClientReply streamWatchForCommit(long commitIndex)
       throws IOException {
+    final long index;
     try {
       XceiverClientReply reply =
           xceiverClient.watchForCommit(commitIndex);
+      if (reply == null) {
+        index = 0;
+      } else {
+        index = reply.getLogIndex();
+      }
+      adjustBuffers(index);
       return reply;
     } catch (InterruptedException e) {
       // Re-interrupt the thread while catching InterruptedException
@@ -140,11 +160,52 @@ public class StreamCommitWatcher {
     }
   }
 
+  void releaseBuffersOnException() {
+    adjustBuffers(xceiverClient.getReplicatedMinCommitIndex());
+  }
+
+  private void adjustBuffers(long commitIndex) {
+    List<Long> keyList = commitIndexMap.keySet().stream()
+        .filter(p -> p <= commitIndex).collect(Collectors.toList());
+    if (!keyList.isEmpty()) {
+      releaseBuffers(keyList);
+    }
+  }
+
+  private long releaseBuffers(List<Long> indexes) {
+    Preconditions.checkArgument(!commitIndexMap.isEmpty());
+    for (long index : indexes) {
+      Preconditions.checkState(commitIndexMap.containsKey(index));
+      final List<StreamBuffer> buffers = commitIndexMap.remove(index);
+      final long length =
+          buffers.stream().mapToLong(StreamBuffer::length).sum();
+      totalAckDataLength += length;
+      // clear the future object from the future Map
+      final CompletableFuture<ContainerCommandResponseProto> remove =
+          futureMap.remove(totalAckDataLength);
+      if (remove == null) {
+        LOG.error("Couldn't find required future for " + totalAckDataLength);
+        for (Long key : futureMap.keySet()) {
+          LOG.error("Existing acknowledged data: " + key);
+        }
+      }
+      for (StreamBuffer byteBuffer : buffers) {
+        bufferList.remove(byteBuffer);
+      }
+    }
+    return totalAckDataLength;
+  }
+
+  public long getTotalAckDataLength() {
+    return totalAckDataLength;
+  }
+
   private IOException getIOExceptionForWatchForCommit(long commitIndex,
                                                        Exception e) {
     LOG.warn("watchForCommit failed for index {}", commitIndex, e);
     IOException ioException = new IOException(
         "Unexpected Storage Container Exception: " + e.toString(), e);
+    releaseBuffersOnException();
     return ioException;
   }
 
@@ -155,12 +216,12 @@ public class StreamCommitWatcher {
   }
 
   public void cleanup() {
-    if (commitIndexSet != null) {
-      commitIndexSet.clear();
+    if (commitIndexMap != null) {
+      commitIndexMap.clear();
     }
     if (futureMap != null) {
       futureMap.clear();
     }
-    commitIndexSet = null;
+    commitIndexMap = null;
   }
 }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
index f0c3a43e89..2cd5630549 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntry.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
 import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
+import org.apache.hadoop.hdds.scm.storage.StreamBuffer;
 import org.apache.hadoop.hdds.security.token.OzoneBlockTokenIdentifier;
 import org.apache.hadoop.security.token.Token;
 
@@ -32,6 +33,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 import java.util.Collection;
 import java.util.Collections;
+import java.util.List;
 
 /**
  * Helper class used inside {@link BlockDataStreamOutput}.
@@ -50,6 +52,7 @@ public final class BlockDataStreamOutputEntry
   // the current position of this stream 0 <= currentPosition < length
   private long currentPosition;
   private final Token<OzoneBlockTokenIdentifier> token;
+  private List<StreamBuffer> bufferList;
 
   @SuppressWarnings({"parameternumber", "squid:S00107"})
   private BlockDataStreamOutputEntry(
@@ -58,7 +61,8 @@ public final class BlockDataStreamOutputEntry
       Pipeline pipeline,
       long length,
       Token<OzoneBlockTokenIdentifier> token,
-      OzoneClientConfig config
+      OzoneClientConfig config,
+      List<StreamBuffer> bufferList
   ) {
     this.config = config;
     this.byteBufferStreamOutput = null;
@@ -69,6 +73,7 @@ public final class BlockDataStreamOutputEntry
     this.token = token;
     this.length = length;
     this.currentPosition = 0;
+    this.bufferList = bufferList;
   }
 
   long getLength() {
@@ -92,8 +97,8 @@ public final class BlockDataStreamOutputEntry
   private void checkStream() throws IOException {
     if (this.byteBufferStreamOutput == null) {
       this.byteBufferStreamOutput =
-          new BlockDataStreamOutput(blockID, xceiverClientManager,
-              pipeline, config, token);
+          new BlockDataStreamOutput(blockID, xceiverClientManager, pipeline,
+              config, token, bufferList);
     }
   }
 
@@ -151,6 +156,20 @@ public final class BlockDataStreamOutputEntry
     }
   }
 
+  long getTotalAckDataLength() {
+    if (byteBufferStreamOutput != null) {
+      BlockDataStreamOutput out =
+          (BlockDataStreamOutput) this.byteBufferStreamOutput;
+      blockID = out.getBlockID();
+      return out.getTotalAckDataLength();
+    } else {
+      // For a pre allocated block for which no write has been initiated,
+      // the OutputStream will be null here.
+      // In such cases, the default blockCommitSequenceId will be 0
+      return 0;
+    }
+  }
+
   void cleanup(boolean invalidateClient) throws IOException {
     checkStream();
     BlockDataStreamOutput out =
@@ -180,6 +199,7 @@ public final class BlockDataStreamOutputEntry
     private long length;
     private Token<OzoneBlockTokenIdentifier> token;
     private OzoneClientConfig config;
+    private List<StreamBuffer> bufferList;
 
     public Builder setBlockID(BlockID bID) {
       this.blockID = bID;
@@ -219,13 +239,18 @@ public final class BlockDataStreamOutputEntry
       return this;
     }
 
+    public Builder setBufferList(List<StreamBuffer> bList) {
+      this.bufferList = bList;
+      return this;
+    }
+
     public BlockDataStreamOutputEntry build() {
       return new BlockDataStreamOutputEntry(blockID,
           key,
           xceiverClientManager,
           pipeline,
           length,
-          token, config);
+          token, config, bufferList);
     }
   }
 
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
index 4bc55de262..e49b0b79ad 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/BlockDataStreamOutputEntryPool.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.scm.OzoneClientConfig;
 import org.apache.hadoop.hdds.scm.XceiverClientFactory;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ExcludeList;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import org.apache.hadoop.hdds.scm.storage.StreamBuffer;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
@@ -59,6 +60,7 @@ public class BlockDataStreamOutputEntryPool {
   private OmMultipartCommitUploadPartInfo commitUploadPartInfo;
   private final long openID;
   private final ExcludeList excludeList;
+  private List<StreamBuffer> bufferList;
 
   @SuppressWarnings({"parameternumber", "squid:S00107"})
   public BlockDataStreamOutputEntryPool(
@@ -83,6 +85,7 @@ public class BlockDataStreamOutputEntryPool {
     this.requestID = requestId;
     this.openID = openID;
     this.excludeList = new ExcludeList();
+    this.bufferList = new ArrayList<>();
   }
 
   /**
@@ -142,7 +145,8 @@ public class BlockDataStreamOutputEntryPool {
             .setPipeline(subKeyInfo.getPipeline())
             .setConfig(config)
             .setLength(subKeyInfo.getLength())
-            .setToken(subKeyInfo.getToken());
+            .setToken(subKeyInfo.getToken())
+            .setBufferList(bufferList);
     streamEntries.add(builder.build());
   }
 
@@ -301,4 +305,12 @@ public class BlockDataStreamOutputEntryPool {
   boolean isEmpty() {
     return streamEntries.isEmpty();
   }
+
+  long computeBufferData() {
+    long totalDataLen =0;
+    for (StreamBuffer b : bufferList){
+      totalDataLen += b.length();
+    }
+    return totalDataLen;
+  }
 }
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
index 9bba89d0a8..2540e42e24 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/KeyDataStreamOutput.java
@@ -278,11 +278,14 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
     }
     Pipeline pipeline = streamEntry.getPipeline();
     PipelineID pipelineId = pipeline.getId();
-
+    long totalSuccessfulFlushedData = streamEntry.getTotalAckDataLength();
+    //set the correct length for the current stream
+    streamEntry.setCurrentPosition(totalSuccessfulFlushedData);
     long containerId = streamEntry.getBlockID().getContainerID();
     Collection<DatanodeDetails> failedServers = streamEntry.getFailedServers();
     Preconditions.checkNotNull(failedServers);
     ExcludeList excludeList = blockDataStreamOutputEntryPool.getExcludeList();
+    long bufferedDataLen = blockDataStreamOutputEntryPool.computeBufferData();
     if (!failedServers.isEmpty()) {
       excludeList.addDatanodes(failedServers);
     }
@@ -316,6 +319,13 @@ public class KeyDataStreamOutput implements ByteBufferStreamOutput {
       blockDataStreamOutputEntryPool
           .discardPreallocatedBlocks(-1, pipelineId);
     }
+    if (bufferedDataLen > 0) {
+      // If the data is still cached in the underlying stream, we need to
+      // allocate new block and write this data in the datanode.
+      handleRetry(exception, bufferedDataLen);
+      // reset the retryCount after handling the exception
+      retryCount = 0;
+    }
   }
 
   private void markStreamClosed() {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index d3b2d22577..05a101951b 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -21,15 +21,19 @@ import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
+import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.TestHelper;
 import org.junit.AfterClass;
+import org.junit.Assert;
 import org.junit.BeforeClass;
 import org.junit.Rule;
 import org.junit.Test;
@@ -127,21 +131,25 @@ public class TestBlockDataStreamOutput {
   @Test
   public void testHalfChunkWrite() throws Exception {
     testWrite(chunkSize / 2);
+    testWriteWithFailure(chunkSize/2);
   }
 
   @Test
   public void testSingleChunkWrite() throws Exception {
     testWrite(chunkSize);
+    testWriteWithFailure(chunkSize);
   }
 
   @Test
   public void testMultiChunkWrite() throws Exception {
     testWrite(chunkSize + 50);
+    testWriteWithFailure(chunkSize + 50);
   }
 
   @Test
   public void testMultiBlockWrite() throws Exception {
     testWrite(blockSize + 50);
+    testWriteWithFailure(blockSize + 50);
   }
 
   private void testWrite(int dataLength) throws Exception {
@@ -156,6 +164,28 @@ public class TestBlockDataStreamOutput {
     key.close();
     validateData(keyName, data);
   }
+
+  private void testWriteWithFailure(int dataLength) throws Exception {
+    String keyName = getKeyName();
+    OzoneDataStreamOutput key = createKey(
+        keyName, ReplicationType.RATIS, 0);
+    byte[] data =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
+            .getBytes(UTF_8);
+    ByteBuffer b = ByteBuffer.wrap(data);
+    key.write(b);
+    KeyDataStreamOutput keyDataStreamOutput =
+        (KeyDataStreamOutput) key.getByteBufStreamOutput();
+    ByteBufferStreamOutput stream =
+        keyDataStreamOutput.getStreamEntries().get(0).getByteBufStreamOutput();
+    Assert.assertTrue(stream instanceof BlockDataStreamOutput);
+    TestHelper.waitForContainerClose(key, cluster);
+    key.write(b);
+    key.close();
+    String dataString = new String(data, UTF_8);
+    validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
+  }
+
   private OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
       long size) throws Exception {
     return TestHelper.createStreamKey(
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
index cf3a51241e..14cd1b66f4 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/TestHelper.java
@@ -46,7 +46,9 @@ import org.apache.hadoop.ozone.HddsDatanodeService;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.ObjectStore;
+import org.apache.hadoop.ozone.client.io.BlockDataStreamOutputEntry;
 import org.apache.hadoop.ozone.client.io.BlockOutputStreamEntry;
+import org.apache.hadoop.ozone.client.io.KeyDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.KeyOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
@@ -205,6 +207,24 @@ public final class TestHelper {
     waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
   }
 
+
+  public static void waitForContainerClose(OzoneDataStreamOutput outputStream,
+      MiniOzoneCluster cluster) throws Exception {
+    KeyDataStreamOutput keyOutputStream =
+        (KeyDataStreamOutput) outputStream.getByteBufStreamOutput();
+    List<BlockDataStreamOutputEntry> streamEntryList =
+        keyOutputStream.getStreamEntries();
+    List<Long> containerIdList = new ArrayList<>();
+    for (BlockDataStreamOutputEntry entry : streamEntryList) {
+      long id = entry.getBlockID().getContainerID();
+      if (!containerIdList.contains(id)) {
+        containerIdList.add(id);
+      }
+    }
+    Assert.assertTrue(!containerIdList.isEmpty());
+    waitForContainerClose(cluster, containerIdList.toArray(new Long[0]));
+  }
+
   public static void waitForPipelineClose(OzoneOutputStream outputStream,
       MiniOzoneCluster cluster, boolean waitForContainerCreation)
       throws Exception {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 18/38: HDDS-5851. [Ozone-Streaming] Define a PutBlock/maxBuffer fixed boundary for streaming writes. (#2866)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit eb441a8e823af85aa9566135e127b3a70c2c92b8
Author: Sadanand Shenoy <sa...@gmail.com>
AuthorDate: Wed Dec 1 11:00:46 2021 +0530

    HDDS-5851. [Ozone-Streaming] Define a PutBlock/maxBuffer fixed boundary for streaming writes. (#2866)
    
    (cherry picked from commit 7a6ea80e59bc3795a3cdcc571c238674967cdd86)
    (cherry picked from commit 3b9141e18d42555ccc62f7c9718b96fd8b88e55f)
---
 .../apache/hadoop/hdds/scm/OzoneClientConfig.java  | 31 ++++++++++++++++++++
 .../hdds/scm/storage/BlockDataStreamOutput.java    | 29 ++++++++++++++----
 .../org/apache/hadoop/ozone/MiniOzoneCluster.java  | 12 ++++++++
 .../apache/hadoop/ozone/MiniOzoneClusterImpl.java  | 11 +++++++
 .../client/rpc/TestBlockDataStreamOutput.java      | 34 ++++++++++++++++++++++
 5 files changed, 111 insertions(+), 6 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
index 14a8aacbc0..715a19212b 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/OzoneClientConfig.java
@@ -66,6 +66,21 @@ public class OzoneClientConfig {
       tags = ConfigTag.CLIENT)
   private int streamBufferSize = 4 * 1024 * 1024;
 
+  @Config(key = "datastream.max.buffer.size",
+      defaultValue = "4MB",
+      type = ConfigType.SIZE,
+      description = "The maximum size of the ByteBuffer "
+          + "(used via ratis streaming)",
+      tags = ConfigTag.CLIENT)
+  private int dataStreamMaxBufferSize = 4 * 1024 * 1024;
+
+  @Config(key = "datastream.buffer.flush.size",
+      defaultValue = "16MB",
+      type = ConfigType.SIZE,
+      description = "The boundary at which putBlock is executed",
+      tags = ConfigTag.CLIENT)
+  private long dataStreamBufferFlushSize = 16 * 1024 * 1024;
+
   @Config(key = "stream.buffer.increment",
       defaultValue = "0B",
       type = ConfigType.SIZE,
@@ -228,6 +243,14 @@ public class OzoneClientConfig {
     this.streamBufferSize = streamBufferSize;
   }
 
+  public int getDataStreamMaxBufferSize() {
+    return dataStreamMaxBufferSize;
+  }
+
+  public void setDataStreamMaxBufferSize(int dataStreamMaxBufferSize) {
+    this.dataStreamMaxBufferSize = dataStreamMaxBufferSize;
+  }
+
   public boolean isStreamBufferFlushDelay() {
     return streamBufferFlushDelay;
   }
@@ -296,6 +319,14 @@ public class OzoneClientConfig {
     return bufferIncrement;
   }
 
+  public long getDataStreamBufferFlushSize() {
+    return dataStreamBufferFlushSize;
+  }
+
+  public void setDataStreamBufferFlushSize(long dataStreamBufferFlushSize) {
+    this.dataStreamBufferFlushSize = dataStreamBufferFlushSize;
+  }
+
   public ChecksumCombineMode getChecksumCombineMode() {
     try {
       return ChecksumCombineMode.valueOf(checksumCombineMode);
diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index aada48e2f5..6f5a54354a 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -257,13 +257,30 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     if (len == 0) {
       return;
     }
+    int curLen = len;
+    // set limit on the number of bytes that a ByteBuffer(StreamBuffer) can hold
+    int maxBufferLen = config.getDataStreamMaxBufferSize();
+    while (curLen > 0) {
+      int writeLen = Math.min(curLen, maxBufferLen);
+      final StreamBuffer buf = new StreamBuffer(b, off, writeLen);
+      off += writeLen;
+      bufferList.add(buf);
+      writeChunkToContainer(buf.duplicate());
+      curLen -= writeLen;
+      writtenDataLength += writeLen;
+      doFlushIfNeeded();
+    }
+  }
 
-    final StreamBuffer buf = new StreamBuffer(b, off, len);
-    bufferList.add(buf);
-
-    writeChunkToContainer(buf.duplicate());
-
-    writtenDataLength += len;
+  private void doFlushIfNeeded() throws IOException {
+    Preconditions.checkArgument(config.getDataStreamBufferFlushSize() > config
+        .getDataStreamMaxBufferSize());
+    long boundary = config.getDataStreamBufferFlushSize() / config
+        .getDataStreamMaxBufferSize();
+    if (bufferList.size() % boundary == 0) {
+      updateFlushLength();
+      executePutBlock(false, false);
+    }
   }
 
   private void updateFlushLength() {
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
index fa2d192058..a97ea318e2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
@@ -325,6 +325,8 @@ public interface MiniOzoneCluster {
     protected Optional<Integer> chunkSize = Optional.empty();
     protected OptionalInt streamBufferSize = OptionalInt.empty();
     protected Optional<Long> streamBufferFlushSize = Optional.empty();
+    protected Optional<Long> dataStreamBufferFlushSize= Optional.empty();
+    protected OptionalInt dataStreamMaxBufferSize  = OptionalInt.empty();
     protected Optional<Long> streamBufferMaxSize = Optional.empty();
     protected Optional<Long> blockSize = Optional.empty();
     protected Optional<StorageUnit> streamBufferSizeUnit = Optional.empty();
@@ -566,6 +568,16 @@ public interface MiniOzoneCluster {
       return this;
     }
 
+    public Builder setDataStreamBufferMaxSize(int size) {
+      dataStreamMaxBufferSize = OptionalInt.of(size);
+      return this;
+    }
+
+    public Builder setDataStreamBufferFlushize(long size) {
+      dataStreamBufferFlushSize = Optional.of(size);
+      return this;
+    }
+
     /**
      * Sets the block size for stream buffer.
      *
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 2421daeaa6..2c26f9725f 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -654,6 +654,12 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
       if (!streamBufferMaxSize.isPresent()) {
         streamBufferMaxSize = Optional.of(2 * streamBufferFlushSize.get());
       }
+      if (!dataStreamBufferFlushSize.isPresent()) {
+        dataStreamBufferFlushSize = Optional.of((long) 4 * chunkSize.get());
+      }
+      if (!dataStreamMaxBufferSize.isPresent()) {
+        dataStreamMaxBufferSize = OptionalInt.of(chunkSize.get());
+      }
       if (!blockSize.isPresent()) {
         blockSize = Optional.of(2 * streamBufferMaxSize.get());
       }
@@ -670,6 +676,11 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
           streamBufferSizeUnit.get().toBytes(streamBufferMaxSize.get())));
       clientConfig.setStreamBufferFlushSize(Math.round(
           streamBufferSizeUnit.get().toBytes(streamBufferFlushSize.get())));
+      clientConfig.setDataStreamBufferFlushSize(Math.round(
+          streamBufferSizeUnit.get().toBytes(dataStreamBufferFlushSize.get())));
+      clientConfig.setDataStreamMaxBufferSize((int) Math.round(
+          streamBufferSizeUnit.get()
+              .toBytes(dataStreamMaxBufferSize.getAsInt())));
       conf.setFromObject(clientConfig);
 
       conf.setStorageSize(ScmConfigKeys.OZONE_SCM_CHUNK_SIZE_KEY,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 05a101951b..5eb38a00de 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -20,7 +20,10 @@ package org.apache.hadoop.ozone.client.rpc;
 import org.apache.hadoop.conf.StorageUnit;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.OzoneClientConfig;
+import org.apache.hadoop.hdds.scm.XceiverClientManager;
+import org.apache.hadoop.hdds.scm.XceiverClientMetrics;
 import org.apache.hadoop.hdds.scm.storage.BlockDataStreamOutput;
 import org.apache.hadoop.hdds.scm.storage.ByteBufferStreamOutput;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -101,6 +104,8 @@ public class TestBlockDataStreamOutput {
         .setChunkSize(chunkSize)
         .setStreamBufferFlushSize(flushSize)
         .setStreamBufferMaxSize(maxFlushSize)
+        .setDataStreamBufferFlushize(maxFlushSize)
+        .setDataStreamBufferMaxSize(chunkSize)
         .setStreamBufferSizeUnit(StorageUnit.BYTES)
         .build();
     cluster.waitForClusterToBeReady();
@@ -186,6 +191,35 @@ public class TestBlockDataStreamOutput {
     validateData(keyName, dataString.concat(dataString).getBytes(UTF_8));
   }
 
+  @Test
+  public void testPutBlockAtBoundary() throws Exception {
+    int dataLength = 500;
+    XceiverClientMetrics metrics =
+        XceiverClientManager.getXceiverClientMetrics();
+    long putBlockCount = metrics.getContainerOpCountMetrics(
+        ContainerProtos.Type.PutBlock);
+    long pendingPutBlockCount = metrics.getPendingContainerOpCountMetrics(
+        ContainerProtos.Type.PutBlock);
+    String keyName = getKeyName();
+    OzoneDataStreamOutput key = createKey(
+        keyName, ReplicationType.RATIS, 0);
+    byte[] data =
+        ContainerTestHelper.getFixedLengthString(keyString, dataLength)
+            .getBytes(UTF_8);
+    key.write(ByteBuffer.wrap(data));
+    Assert.assertTrue(
+        metrics.getPendingContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
+            <= pendingPutBlockCount + 1);
+    key.close();
+    // Since data length is 500 , first putBlock will be at 400(flush boundary)
+    // and the other at 500
+    Assert.assertTrue(
+        metrics.getContainerOpCountMetrics(ContainerProtos.Type.PutBlock)
+            == putBlockCount + 2);
+    validateData(keyName, data);
+  }
+
+
   private OzoneDataStreamOutput createKey(String keyName, ReplicationType type,
       long size) throws Exception {
     return TestHelper.createStreamKey(


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 16/38: HDDS-5879. [Ozone-Streaming] OzoneBucket add the createMultipartStreamKey method (#2760)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 8de50af361ed88ef7bf50bc84adffd0790c969c1
Author: hao guo <gu...@360.cn>
AuthorDate: Fri Nov 19 11:21:55 2021 +0800

    HDDS-5879. [Ozone-Streaming] OzoneBucket add the createMultipartStreamKey method (#2760)
    
    (cherry picked from commit be9641f9fa281828621d5137fbbd7a1384096301)
    (cherry picked from commit 9a16e41e1f29373e0e0f2a186237ded0dd0b53b4)
---
 .../apache/hadoop/ozone/client/OzoneBucket.java    | 15 +++++
 .../ozone/client/protocol/ClientProtocol.java      | 18 ++++++
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  | 64 ++++++++++++++++++++++
 .../client/rpc/TestOzoneRpcClientWithRatis.java    | 53 ++++++++++++++++++
 4 files changed, 150 insertions(+)

diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
index 2d27858e16..20d34df15e 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/OzoneBucket.java
@@ -810,6 +810,21 @@ public class OzoneBucket extends WithMetadata {
         uploadID);
   }
 
+  /**
+   * Create a part key for a multipart upload key.
+   * @param key
+   * @param size
+   * @param partNumber
+   * @param uploadID
+   * @return OzoneDataStreamOutput
+   * @throws IOException
+   */
+  public OzoneDataStreamOutput createMultipartStreamKey(String key,
+      long size, int partNumber, String uploadID) throws IOException {
+    return proxy.createMultipartStreamKey(volumeName, name,
+            key, size, partNumber, uploadID);
+  }
+
   /**
    * Complete Multipart upload. This will combine all the parts and make the
    * key visible in ozone.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 8f7f385628..7660fc0e46 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -494,6 +494,24 @@ public interface ClientProtocol {
                                        int partNumber, String uploadID)
       throws IOException;
 
+  /**
+   * Create a part key for a multipart upload key.
+   * @param volumeName
+   * @param bucketName
+   * @param keyName
+   * @param size
+   * @param partNumber
+   * @param uploadID
+   * @return OzoneDataStreamOutput
+   * @throws IOException
+   */
+  OzoneDataStreamOutput createMultipartStreamKey(String volumeName,
+                                                 String bucketName,
+                                                 String keyName, long size,
+                                                 int partNumber,
+                                                 String uploadID)
+      throws IOException;
+
   /**
    * Complete Multipart upload. This will combine all the parts and make the
    * key visible in ozone.
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 8c07a58fa4..fcdf2b9107 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1584,6 +1584,70 @@ public class RpcClient implements ClientProtocol {
     }
   }
 
+  @Override
+  public OzoneDataStreamOutput createMultipartStreamKey(
+      String volumeName,
+      String bucketName,
+      String keyName,
+      long size,
+      int partNumber,
+      String uploadID)
+      throws IOException {
+    verifyVolumeName(volumeName);
+    verifyBucketName(bucketName);
+    if (checkKeyNameEnabled) {
+      HddsClientUtils.verifyKeyName(keyName);
+    }
+    HddsClientUtils.checkNotNull(keyName, uploadID);
+    Preconditions.checkArgument(partNumber > 0 && partNumber <= 10000, "Part " +
+        "number should be greater than zero and less than or equal to 10000");
+    Preconditions.checkArgument(size >= 0, "size should be greater than or " +
+        "equal to zero");
+    String requestId = UUID.randomUUID().toString();
+
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder()
+        .setVolumeName(volumeName)
+        .setBucketName(bucketName)
+        .setKeyName(keyName)
+        .setDataSize(size)
+        .setIsMultipartKey(true)
+        .setMultipartUploadID(uploadID)
+        .setMultipartUploadPartNumber(partNumber)
+        .setAcls(getAclList())
+        .build();
+
+    OpenKeySession openKey = ozoneManagerClient.openKey(keyArgs);
+
+    KeyDataStreamOutput keyOutputStream =
+        new KeyDataStreamOutput.Builder()
+            .setHandler(openKey)
+            .setXceiverClientManager(xceiverClientManager)
+            .setOmClient(ozoneManagerClient)
+            .setRequestID(requestId)
+            .setReplicationConfig(openKey.getKeyInfo().getReplicationConfig())
+            .setMultipartNumber(partNumber)
+            .setMultipartUploadID(uploadID)
+            .setIsMultipartKey(true)
+            .enableUnsafeByteBufferConversion(unsafeByteBufferConversion)
+            .setConfig(clientConfig)
+            .build();
+    keyOutputStream
+        .addPreallocateBlocks(
+            openKey.getKeyInfo().getLatestVersionLocations(),
+            openKey.getOpenVersion());
+
+    FileEncryptionInfo feInfo = openKey.getKeyInfo().getFileEncryptionInfo();
+    if (feInfo != null) {
+      // todo: need to support file encrypt,
+      //  https://issues.apache.org/jira/browse/HDDS-5892
+      throw new UnsupportedOperationException(
+          "FileEncryptionInfo is not yet supported in " +
+              "createMultipartStreamKey");
+    } else {
+      return new OzoneDataStreamOutput(keyOutputStream);
+    }
+  }
+
   @Override
   public OmMultipartUploadCompleteInfo completeMultipartUpload(
       String volumeName, String bucketName, String keyName, String uploadID,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
index 94bfaefcc0..65551bc4e9 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientWithRatis.java
@@ -19,10 +19,12 @@
 package org.apache.hadoop.ozone.client.rpc;
 
 import java.io.IOException;
+import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.HashMap;
 import java.util.UUID;
 
+import org.apache.hadoop.hdds.client.ReplicationConfig;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
@@ -31,12 +33,15 @@ import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneBucket;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneMultipartUploadPartListParts;
 import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.client.io.OzoneDataStreamOutput;
 import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.common.OzoneChecksumException;
 import org.apache.hadoop.ozone.om.OMConfigKeys;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
 import org.junit.jupiter.api.AfterAll;
 import org.junit.Assert;
 import org.junit.jupiter.api.BeforeAll;
@@ -44,6 +49,7 @@ import org.junit.jupiter.api.Test;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.fail;
 
 /**
@@ -155,4 +161,51 @@ public class TestOzoneRpcClientWithRatis extends TestOzoneRpcClientAbstract {
       }
     }
   }
+
+  @Test
+  public void testMultiPartUploadWithStream() throws IOException {
+    String volumeName = UUID.randomUUID().toString();
+    String bucketName = UUID.randomUUID().toString();
+    String keyName = UUID.randomUUID().toString();
+
+    byte[] sampleData = new byte[1024 * 8];
+
+    int valueLength = sampleData.length;
+
+    getStore().createVolume(volumeName);
+    OzoneVolume volume = getStore().getVolume(volumeName);
+    volume.createBucket(bucketName);
+    OzoneBucket bucket = volume.getBucket(bucketName);
+
+    ReplicationConfig replicationConfig =
+        ReplicationConfig.fromTypeAndFactor(
+            ReplicationType.RATIS,
+            THREE);
+
+    OmMultipartInfo multipartInfo = bucket.initiateMultipartUpload(keyName,
+        replicationConfig);
+
+    assertNotNull(multipartInfo);
+    String uploadID = multipartInfo.getUploadID();
+    Assert.assertEquals(volumeName, multipartInfo.getVolumeName());
+    Assert.assertEquals(bucketName, multipartInfo.getBucketName());
+    Assert.assertEquals(keyName, multipartInfo.getKeyName());
+    assertNotNull(multipartInfo.getUploadID());
+
+    OzoneDataStreamOutput ozoneStreamOutput = bucket.createMultipartStreamKey(
+        keyName, valueLength, 1, uploadID);
+    ozoneStreamOutput.write(ByteBuffer.wrap(sampleData), 0,
+        valueLength);
+    ozoneStreamOutput.close();
+
+    OzoneMultipartUploadPartListParts parts =
+        bucket.listParts(keyName, uploadID, 0, 1);
+
+    Assert.assertEquals(parts.getPartInfoList().size(), 1);
+
+    OzoneMultipartUploadPartListParts.PartInfo partInfo =
+        parts.getPartInfoList().get(0);
+    Assert.assertEquals(valueLength, partInfo.getSize());
+
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 26/38: HDDS-5487. [Ozone-Streaming] BlockDataStreamOutput support FlushDelay. (#3002)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 434d1fc57d5ff65923b053291bf14fed6d4d58e1
Author: micah zhao <mi...@tencent.com>
AuthorDate: Mon Feb 14 20:58:11 2022 +0800

    HDDS-5487. [Ozone-Streaming] BlockDataStreamOutput support FlushDelay.  (#3002)
    
    (cherry picked from commit 366a0d2cefe75bedb3239fee2642eaf415afb80b)
    (cherry picked from commit 576c359114aba78d203520a5edf62ff831cf5919)
---
 .../apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java  | 10 +++++++++-
 .../hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java     |  1 -
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index a3fe1c2479..9ac43300f7 100644
--- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -429,7 +429,7 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
     } else {
       byteBufferList = null;
     }
-    flush();
+    waitFuturesComplete();
     if (close) {
       dataStreamCloseReply = out.closeAsync();
     }
@@ -485,8 +485,16 @@ public class BlockDataStreamOutput implements ByteBufferStreamOutput {
 
   @Override
   public void flush() throws IOException {
+    if (xceiverClientFactory != null && xceiverClient != null
+        && !config.isStreamBufferFlushDelay()) {
+      waitFuturesComplete();
+    }
+  }
+
+  public void waitFuturesComplete() throws IOException {
     try {
       CompletableFuture.allOf(futures.toArray(EMPTY_FUTURE_ARRAY)).get();
+      futures.clear();
     } catch (Exception e) {
       LOG.warn("Failed to write all chunks through stream: " + e);
       throw new IOException(e);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
index 696ab92ab7..21003374d7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockDataStreamOutput.java
@@ -89,7 +89,6 @@ public class TestBlockDataStreamOutput {
     blockSize = 2 * maxFlushSize;
 
     OzoneClientConfig clientConfig = conf.getObject(OzoneClientConfig.class);
-    clientConfig.setStreamBufferFlushDelay(false);
     conf.setFromObject(clientConfig);
 
     conf.setTimeDuration(HDDS_SCM_WATCHER_TIMEOUT, 1000, TimeUnit.MILLISECONDS);


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org


[ozone] 03/38: HDDS-5481. Fix stream() and link() method in ContainerStateMachine. (#2451)

Posted by sz...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

szetszwo pushed a commit to branch HDDS-4454
in repository https://gitbox.apache.org/repos/asf/ozone.git

commit 01beaf8f43d3e900cfa5f0c2a876fe6d67600c53
Author: Kaijie Chen <ch...@kaijie.org>
AuthorDate: Thu Jul 22 19:46:31 2021 +0800

    HDDS-5481. Fix stream() and link() method in ContainerStateMachine. (#2451)
    
    (cherry picked from commit bda70049ff1c608fb549676ddd099cb5b2b1206c)
    (cherry picked from commit 444b7b38406fbcc4c6ec957a10273ff1ee58cdfb)
---
 .../common/transport/server/ratis/ContainerStateMachine.java          | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
index f8d066757a..121a6d6bdd 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java
@@ -518,8 +518,7 @@ public class ContainerStateMachine extends BaseStateMachine {
     return CompletableFuture.supplyAsync(() -> {
       try {
         ContainerCommandRequestProto requestProto =
-            getContainerCommandRequestProto(gid,
-                request.getMessage().getContent());
+            message2ContainerCommandRequestProto(request.getMessage());
         DispatcherContext context =
             new DispatcherContext.Builder()
                 .setStage(DispatcherContext.WriteChunkStage.WRITE_DATA)
@@ -536,6 +535,7 @@ public class ContainerStateMachine extends BaseStateMachine {
     }, executor);
   }
 
+  @Override
   public CompletableFuture<?> link(DataStream stream, LogEntryProto entry) {
     return CompletableFuture.supplyAsync(() -> {
       if (stream == null) {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org