You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by st...@apache.org on 2022/11/29 14:53:00 UTC
[hadoop] branch branch-3.3 updated: HADOOP-18530. ChecksumFileSystem::readVectored might return byte buffers not positioned at 0 (#5168)
This is an automated email from the ASF dual-hosted git repository.
stevel pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/branch-3.3 by this push:
new f29d9a11bca HADOOP-18530. ChecksumFileSystem::readVectored might return byte buffers not positioned at 0 (#5168)
f29d9a11bca is described below
commit f29d9a11bca67759cc6adc9afd834a62426248cb
Author: HarshitGupta11 <50...@users.noreply.github.com>
AuthorDate: Tue Nov 29 20:21:22 2022 +0530
HADOOP-18530. ChecksumFileSystem::readVectored might return byte buffers not positioned at 0 (#5168)
Contributed by Harshit Gupta
---
.../src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java | 7 +++++++
.../src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java | 6 ++++++
2 files changed, 13 insertions(+)
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java
index 50cab7dc4cc..cf1b1ef9698 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/VectoredReadUtils.java
@@ -307,9 +307,16 @@ public final class VectoredReadUtils {
FileRange request) {
int offsetChange = (int) (request.getOffset() - readOffset);
int requestLength = request.getLength();
+ // Create a new buffer that is backed by the original contents
+ // The buffer will have position 0 and the same limit as the original one
readData = readData.slice();
+ // Change the offset and the limit of the buffer as the reader wants to see
+ // only relevant data
readData.position(offsetChange);
readData.limit(offsetChange + requestLength);
+ // Create a new buffer after the limit change so that only that portion of the data is
+ // returned to the reader.
+ readData = readData.slice();
return readData;
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java
index fdfa8f6eb6f..e964d23f4b7 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestVectoredReadUtils.java
@@ -61,6 +61,9 @@ public class TestVectoredReadUtils extends HadoopTestBase {
.describedAs("Slicing on the same offset shouldn't " +
"create a new buffer")
.isEqualTo(slice);
+ Assertions.assertThat(slice.position())
+ .describedAs("Slicing should return buffers starting from position 0")
+ .isEqualTo(0);
// try slicing a range
final int offset = 100;
@@ -77,6 +80,9 @@ public class TestVectoredReadUtils extends HadoopTestBase {
.describedAs("Slicing should use the same underlying " +
"data")
.isEqualTo(slice.array());
+ Assertions.assertThat(slice.position())
+ .describedAs("Slicing should return buffers starting from position 0")
+ .isEqualTo(0);
// test the contents of the slice
intBuffer = slice.asIntBuffer();
for(int i=0; i < sliceLength / Integer.BYTES; ++i) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org