You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2020/06/10 06:41:24 UTC

[hadoop] branch branch-3.3 updated: HDFS-15398. EC: hdfs client hangs due to exception during addBlock. Contributed by Hongbing Wang.

This is an automated email from the ASF dual-hosted git repository.

ayushsaxena pushed a commit to branch branch-3.3
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.3 by this push:
     new 043628d  HDFS-15398. EC: hdfs client hangs due to exception during addBlock. Contributed by Hongbing Wang.
043628d is described below

commit 043628dcf157e4005319558f66acaf56ba4bf9fc
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Wed Jun 10 12:06:16 2020 +0530

    HDFS-15398. EC: hdfs client hangs due to exception during addBlock. Contributed by Hongbing Wang.
---
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java | 10 ++++--
 .../TestDFSStripedOutputStreamUpdatePipeline.java  | 36 ++++++++++++++++++++++
 2 files changed, 44 insertions(+), 2 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 8d651d8..bd1a6ec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -501,8 +501,14 @@ public class DFSStripedOutputStream extends DFSOutputStream
 
     LOG.debug("Allocating new block group. The previous block group: "
         + prevBlockGroup);
-    final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
-         prevBlockGroup, fileId, favoredNodes, getAddBlockFlags());
+    final LocatedBlock lb;
+    try {
+      lb = addBlock(excludedNodes, dfsClient, src,
+          prevBlockGroup, fileId, favoredNodes, getAddBlockFlags());
+    } catch (IOException ioe) {
+      closeAllStreamers();
+      throw ioe;
+    }
     assert lb.isStriped();
     // assign the new block to the current block group
     currentBlockGroup = lb.getBlock();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java
index 8e50b79..ae29da0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamUpdatePipeline.java
@@ -61,4 +61,40 @@ public class TestDFSStripedOutputStreamUpdatePipeline {
       }
     }
   }
+
+  /**
+   * Test writing ec file hang when applying the second block group occurs
+   * an addBlock exception (e.g. quota exception).
+   */
+  @Test(timeout = 90000)
+  public void testECWriteHangWhenAddBlockWithException() throws Exception {
+    Configuration conf = new HdfsConfiguration();
+    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1 * 1024 * 1024);
+    try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+        .numDataNodes(3).build()) {
+      cluster.waitActive();
+      final DistributedFileSystem dfs = cluster.getFileSystem();
+      // Create a file with EC policy
+      Path dir = new Path("/test");
+      dfs.mkdirs(dir);
+      dfs.enableErasureCodingPolicy("XOR-2-1-1024k");
+      dfs.setErasureCodingPolicy(dir, "XOR-2-1-1024k");
+      Path filePath = new Path("/test/file");
+      FSDataOutputStream out = dfs.create(filePath);
+      for (int i = 0; i < 1024 * 1024 * 2; i++) {
+        out.write(i);
+      }
+      dfs.setQuota(dir, 5, 0);
+      try {
+        for (int i = 0; i < 1024 * 1024 * 2; i++) {
+          out.write(i);
+        }
+      } catch (Exception e) {
+        dfs.delete(filePath, true);
+      } finally {
+        // The close should be success, shouldn't get stuck.
+        IOUtils.closeStream(out);
+      }
+    }
+  }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org