You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ta...@apache.org on 2022/04/19 04:40:36 UTC

[hadoop] branch branch-3.2 updated: HDFS-16538. EC decoding failed due to not enough valid inputs (#4167)

This is an automated email from the ASF dual-hosted git repository.

tasanuma pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
     new d993d220384 HDFS-16538. EC decoding failed due to not enough valid inputs (#4167)
d993d220384 is described below

commit d993d2203840bf98f773fa1dd6e19ec2931dedbd
Author: qinyuren <14...@qq.com>
AuthorDate: Tue Apr 19 12:37:28 2022 +0800

    HDFS-16538. EC decoding failed due to not enough valid inputs (#4167)
    
    Co-authored-by: liubingxing <li...@bigo.sg>
    (cherry picked from commit 52e152f8b0d5f522f3b799ea72c6c887d5d2c42d)
---
 .../org/apache/hadoop/hdfs/StatefulStripeReader.java   |  4 +++-
 .../hadoop/hdfs/TestReadStripedFileWithDecoding.java   | 18 ++++++++++++++++++
 2 files changed, 21 insertions(+), 1 deletion(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
index b37501d2e2b..98a6ba13b02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StatefulStripeReader.java
@@ -52,7 +52,9 @@ class StatefulStripeReader extends StripeReader {
       cur = dfsStripedInputStream.getCurStripeBuf().duplicate();
     }
 
-    this.decodeInputs = new ECChunk[dataBlkNum + parityBlkNum];
+    if (this.decodeInputs == null) {
+      this.decodeInputs = new ECChunk[dataBlkNum + parityBlkNum];
+    }
     int bufLen = (int) alignedStripe.getSpanInBlock();
     int bufOff = (int) alignedStripe.getOffsetInBlock();
     for (int i = 0; i < dataBlkNum; i++) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
index 2fb9212f354..99ea6b687eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReadStripedFileWithDecoding.java
@@ -43,6 +43,7 @@ import java.io.FileOutputStream;
 import java.io.IOException;
 import java.nio.ByteBuffer;
 
+import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.BLOCK_SIZE;
 import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.CELL_SIZE;
 import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.NUM_DATA_UNITS;
 import static org.apache.hadoop.hdfs.ReadStripedFileWithDecodingHelper.NUM_PARITY_UNITS;
@@ -162,4 +163,21 @@ public class TestReadStripedFileWithDecoding {
       DataNodeTestUtils.setHeartbeatsDisabledForTests(dn, false);
     }
   }
+
+  @Test
+  public void testMoreThanOneCorruptedBlock() throws IOException {
+    final Path file = new Path("/corrupted");
+    final int length = BLOCK_SIZE * NUM_DATA_UNITS;
+    final byte[] bytes = StripedFileTestUtil.generateBytes(length);
+    DFSTestUtil.writeFile(dfs, file, bytes);
+
+    // read the file with more than one corrupted data block
+    byte[] buffer = new byte[length + 100];
+    for (int count = 2; count < NUM_PARITY_UNITS; ++count) {
+      ReadStripedFileWithDecodingHelper.corruptBlocks(cluster, dfs, file, count, 0,
+          false);
+      StripedFileTestUtil.verifyStatefulRead(dfs, file, length, bytes,
+          buffer);
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org