You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ad...@apache.org on 2021/05/31 05:53:00 UTC

[ozone] branch master updated: HDDS-4993. Add guardrail for reserved buffer size when DN reads a chunk (#2058)

This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 3ffbdd0  HDDS-4993. Add guardrail for reserved buffer size when DN reads a chunk (#2058)
3ffbdd0 is described below

commit 3ffbdd0a67f0ec41034abbf04f7d8327ae6d1357
Author: Doroszlai, Attila <64...@users.noreply.github.com>
AuthorDate: Mon May 31 07:52:42 2021 +0200

    HDDS-4993. Add guardrail for reserved buffer size when DN reads a chunk (#2058)
---
 .../container/keyvalue/helpers/ChunkUtils.java     | 12 ++++++++
 .../keyvalue/impl/ChunkManagerDummyImpl.java       |  6 +++-
 .../keyvalue/impl/FilePerBlockStrategy.java        |  5 +++-
 .../keyvalue/impl/FilePerChunkStrategy.java        |  2 ++
 .../keyvalue/impl/CommonChunkManagerTestCases.java | 33 ++++++++++++++++++++++
 5 files changed, 56 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
index 68d2899..1178127 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/ChunkUtils.java
@@ -54,6 +54,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.IO_EXCEPTION;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.NO_SUCH_ALGORITHM;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNABLE_TO_FIND_CHUNK;
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
 import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.onFailure;
 
 import org.slf4j.Logger;
@@ -339,6 +340,17 @@ public final class ChunkUtils {
     }
   }
 
+  public static void limitReadSize(long len)
+      throws StorageContainerException {
+    if (len > OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE) {
+      String err = String.format(
+          "Oversize read. max: %d, actual: %d",
+          OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE, len);
+      LOG.error(err);
+      throw new StorageContainerException(err, UNSUPPORTED_REQUEST);
+    }
+  }
+
   private static StorageContainerException wrapInStorageContainerException(
       IOException e) {
     ContainerProtos.Result result = translate(e);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
index 4cf0834..8ea9899 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/ChunkManagerDummyImpl.java
@@ -34,6 +34,8 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 
 import java.nio.ByteBuffer;
 
+import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
+
 /**
  * Implementation of ChunkManager built for running performance tests.
  * Chunks are not written to disk, Reads are returned with zero-filled buffers
@@ -72,8 +74,10 @@ public class ChunkManagerDummyImpl implements ChunkManager {
    */
   @Override
   public ChunkBuffer readChunk(Container container, BlockID blockID,
-      ChunkInfo info, DispatcherContext dispatcherContext) {
+      ChunkInfo info, DispatcherContext dispatcherContext)
+      throws StorageContainerException {
 
+    limitReadSize(info.getLen());
     // stats are handled in ChunkManagerImpl
     return ChunkBuffer.wrap(ByteBuffer.allocate((int) info.getLen()));
   }
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
index 338c597..715ce4c 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerBlockStrategy.java
@@ -59,6 +59,7 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Res
 import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_BLOCK;
 import static org.apache.hadoop.ozone.container.common.transport.server.ratis.DispatcherContext.WriteChunkStage.COMMIT_DATA;
 import static org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil.onFailure;
+import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
 import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.validateChunkForOverwrite;
 import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.verifyChunkFileExists;
 
@@ -152,6 +153,8 @@ public class FilePerBlockStrategy implements ChunkManager {
       return ChunkBuffer.wrap(ByteBuffer.wrap(new byte[0]));
     }
 
+    limitReadSize(info.getLen());
+
     KeyValueContainerData containerData = (KeyValueContainerData) container
         .getContainerData();
 
@@ -159,7 +162,7 @@ public class FilePerBlockStrategy implements ChunkManager {
 
     File chunkFile = getChunkFile(container, blockID, info);
 
-    long len = info.getLen();
+    int len = (int) info.getLen();
     long offset = info.getOffset();
     long bufferCapacity =  ChunkManager.getBufferCapacityForChunkRead(info,
         defaultReadBufferCapacity);
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
index 5eefbda..f2109cb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/impl/FilePerChunkStrategy.java
@@ -56,6 +56,7 @@ import java.util.List;
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.UNSUPPORTED_REQUEST;
 import static org.apache.hadoop.ozone.container.common.impl.ChunkLayOutVersion.FILE_PER_CHUNK;
+import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;
 
 /**
  * This class is for performing chunk related operations.
@@ -208,6 +209,7 @@ public class FilePerChunkStrategy implements ChunkManager {
       throws StorageContainerException {
 
     checkLayoutVersion(container);
+    limitReadSize(info.getLen());
 
     KeyValueContainer kvContainer = (KeyValueContainer) container;
     KeyValueContainerData containerData = kvContainer.getContainerData();
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
index ca9c7b8..eb2e63c 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/impl/CommonChunkManagerTestCases.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.ozone.container.keyvalue.impl;
 
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.lang3.RandomStringUtils;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
@@ -28,9 +30,14 @@ import org.apache.hadoop.ozone.container.keyvalue.interfaces.ChunkManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 
+import java.io.File;
+import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.OzoneConsts.OZONE_SCM_CHUNK_MAX_SIZE;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertThrows;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
@@ -66,6 +73,32 @@ public abstract class CommonChunkManagerTestCases
   }
 
   @Test
+  public void testReadOversizeChunk() throws IOException {
+    // GIVEN
+    ChunkManager chunkManager = createTestSubject();
+    DispatcherContext dispatcherContext = getDispatcherContext();
+    KeyValueContainer container = getKeyValueContainer();
+    int tooLarge = OZONE_SCM_CHUNK_MAX_SIZE + 1;
+    byte[] array = RandomStringUtils.randomAscii(tooLarge).getBytes(UTF_8);
+    assertTrue(array.length >= tooLarge);
+
+    BlockID blockID = getBlockID();
+    ChunkInfo chunkInfo = new ChunkInfo(
+        String.format("%d.data.%d", blockID.getLocalID(), 0),
+        0, array.length);
+
+    // write chunk bypassing size limit
+    File chunkFile = getStrategy().getLayout()
+        .getChunkFile(getKeyValueContainerData(), blockID, chunkInfo);
+    FileUtils.writeByteArrayToFile(chunkFile, array);
+
+    // WHEN+THEN
+    assertThrows(StorageContainerException.class, () ->
+        chunkManager.readChunk(container, blockID, chunkInfo, dispatcherContext)
+    );
+  }
+
+  @Test
   public void testWriteChunkStageCombinedData() throws Exception {
     // GIVEN
     ChunkManager chunkManager = createTestSubject();

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org