You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ta...@apache.org on 2022/04/20 06:07:58 UTC

[hadoop] branch branch-3.3 updated: HDFS-16544. EC decoding failed due to invalid buffer (#4179)

This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new 2ff91232bc1 HDFS-16544. EC decoding failed due to invalid buffer (#4179)
2ff91232bc1 is described below

commit 2ff91232bc1a20db21812a76763918b74cbc1a2c
Author: qinyuren <14...@qq.com>
AuthorDate: Wed Apr 20 14:04:27 2022 +0800

    HDFS-16544. EC decoding failed due to invalid buffer (#4179)
    
    (cherry picked from commit 76bbd173749f2af4f17946fb37c4c72e2de26764)
---
 .../apache/hadoop/hdfs/StatefulStripeReader.java   |  5 ---
 .../hdfs/TestReadStripedFileWithDecoding.java      | 38 ++++++++++++++++++++++
 2 files changed, 38 insertions(+), 5 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
index 32ea5f80199..bff13bfdc89 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
@@ -74,11 +74,6 @@ class StatefulStripeReader extends StripeReader {
   boolean prepareParityChunk(int index) {
     Preconditions.checkState(index >= dataBlkNum
         && alignedStripe.chunks[index] == null);
-    if (readerInfos[index] != null && readerInfos[index].shouldSkip) {
-      alignedStripe.chunks[index] = new StripingChunk(StripingChunk.MISSING);
-      // we have failed the block reader before
-      return false;
-    }
     final int parityIndex = index - dataBlkNum;
     ByteBuffer buf = dfsStripedInputStream.getParityBuffer().duplicate();
     buf.position(cellSize * parityIndex);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
index 093138beb69..f80cb01bab8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
@@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
@@ -183,4 +185,40 @@ public class TestReadStripedFileWithDecoding {
           buffer);
     }
   }
+
+  @Test
+  public void testReadWithCorruptedDataBlockAndParityBlock() throws IOException {
+    final Path file = new Path("/corruptedDataBlockAndParityBlock");
+    final int length = BLOCK_SIZE * NUM_DATA_UNITS;
+    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
+    DFSTestUtil.writeFile(dfs, file, bytes);
+
+    // set one dataBlock and the first parityBlock corrupted
+    int dataBlkDelNum = 1;
+    int parityBlkDelNum = 1;
+    int recoverBlkNum = dataBlkDelNum + parityBlkDelNum;
+    int[] dataBlkIndices = {0};
+    int[] parityBlkIndices = {6};
+
+    LocatedBlocks locatedBlocks = ReadStripedFileWithDecodingHelper.getLocatedBlocks(dfs, file);
+    LocatedStripedBlock lastBlock =
+        (LocatedStripedBlock)locatedBlocks.getLastLocatedBlock();
+
+    int[] delBlkIndices = new int[recoverBlkNum];
+    System.arraycopy(dataBlkIndices, 0,
+        delBlkIndices, 0, dataBlkIndices.length);
+    System.arraycopy(parityBlkIndices, 0,
+        delBlkIndices, dataBlkIndices.length, parityBlkIndices.length);
+    ExtendedBlock[] delBlocks = new ExtendedBlock[recoverBlkNum];
+    for (int i = 0; i < recoverBlkNum; i++) {
+      delBlocks[i] = StripedBlockUtil
+          .constructInternalBlock(lastBlock.getBlock(),
+              CELL_SIZE, NUM_DATA_UNITS, delBlkIndices[i]);
+      cluster.corruptBlockOnDataNodes(delBlocks[i]);
+    }
+
+    byte[] buffer = new byte[length + 100];
+    StripedFileTestUtil.verifyStatefulRead(dfs, file, length, bytes,
+        buffer);
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org