You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by le...@apache.org on 2017/09/11 17:06:15 UTC

hadoop git commit: Revert "HDFS-12349. Improve log message when it could not alloc enough blocks for EC. (lei)"

Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ab21dfe9 -> de9994bd8


Revert "HDFS-12349. Improve log message when it could not alloc enough blocks for EC. (lei)"

This reverts commit 3e6d0ca2b2f79bfa87faa7bbd46d814a48334fbd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de9994bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de9994bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de9994bd

Branch: refs/heads/trunk
Commit: de9994bd893af70fffdd68af6252fc45020e0e69
Parents: 5ab21df
Author: Lei Xu <le...@apache.org>
Authored: Mon Sep 11 10:06:05 2017 -0700
Committer: Lei Xu <le...@apache.org>
Committed: Mon Sep 11 10:06:05 2017 -0700

----------------------------------------------------------------------
 .../hadoop/hdfs/DFSStripedOutputStream.java     | 17 ++++++------
 .../server/blockmanagement/BlockManager.java    | 26 ++++++------------
 .../hdfs/server/namenode/FSDirWriteFileOp.java  | 14 ++--------
 .../TestDFSStripedOutputStreamWithFailure.java  | 29 ++++++++++----------
 .../hdfs/server/namenode/TestDeadDatanode.java  |  4 +--
 5 files changed, 36 insertions(+), 54 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 09dc181..7f05338 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -260,7 +260,6 @@ public class DFSStripedOutputStream extends DFSOutputStream
 
   private final Coordinator coordinator;
   private final CellBuffers cellBuffers;
-  private final ErasureCodingPolicy ecPolicy;
   private final RawErasureEncoder encoder;
   private final List<StripedDataStreamer> streamers;
   private final DFSPacket[] currentPackets; // current Packet of each streamer
@@ -287,7 +286,7 @@ public class DFSStripedOutputStream extends DFSOutputStream
       LOG.debug("Creating DFSStripedOutputStream for " + src);
     }
 
-    ecPolicy = stat.getErasureCodingPolicy();
+    final ErasureCodingPolicy ecPolicy = stat.getErasureCodingPolicy();
     final int numParityBlocks = ecPolicy.getNumParityUnits();
     cellSize = ecPolicy.getCellSize();
     numDataBlocks = ecPolicy.getNumDataUnits();
@@ -479,6 +478,11 @@ public class DFSStripedOutputStream extends DFSOutputStream
     final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
         currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
     assert lb.isStriped();
+    if (lb.getLocations().length < numDataBlocks) {
+      throw new IOException("Failed to get " + numDataBlocks
+          + " nodes from namenode: blockGroupSize= " + numAllBlocks
+          + ", blocks.length= " + lb.getLocations().length);
+    }
     // assign the new block to the current block group
     currentBlockGroup = lb.getBlock();
     blockGroupIndex++;
@@ -490,16 +494,11 @@ public class DFSStripedOutputStream extends DFSOutputStream
       StripedDataStreamer si = getStripedDataStreamer(i);
       assert si.isHealthy();
       if (blocks[i] == null) {
-        // allocBlock() should guarantee that all data blocks are successfully
-        // allocated.
-        assert i >= numDataBlocks;
         // Set exception and close streamer as there is no block locations
         // found for the parity block.
-        LOG.warn("Cannot allocate parity block(index={}, policy={}). " +
-            "Not enough datanodes? Excluded nodes={}", i,  ecPolicy.getName(),
-            excludedNodes);
+        LOG.warn("Failed to get block location for parity block, index=" + i);
         si.getLastException().set(
-            new IOException("Failed to get parity block, index=" + i));
+            new IOException("Failed to get following block, i=" + i));
         si.getErrorState().setInternalError();
         si.close(true);
       } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 40c249d..f4e5cb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2057,7 +2057,6 @@ public class BlockManager implements BlockStatsMXBean {
       final List<String> favoredNodes,
       final byte storagePolicyID,
       final BlockType blockType,
-      final ErasureCodingPolicy ecPolicy,
       final EnumSet<AddBlockFlag> flags) throws IOException {
     List<DatanodeDescriptor> favoredDatanodeDescriptors = 
         getDatanodeDescriptors(favoredNodes);
@@ -2068,23 +2067,14 @@ public class BlockManager implements BlockStatsMXBean {
     final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
         numOfReplicas, client, excludedNodes, blocksize, 
         favoredDatanodeDescriptors, storagePolicy, flags);
-
-    final String ERROR_MESSAGE = "File %s could only be written to %d of " +
-        "the %d %s. There are %d datanode(s) running and %s "
-        + "node(s) are excluded in this operation.";
-    if (blockType == BlockType.CONTIGUOUS && targets.length < minReplication) {
-      throw new IOException(String.format(ERROR_MESSAGE, src,
-          targets.length, minReplication, "minReplication", minReplication,
-          getDatanodeManager().getNetworkTopology().getNumOfLeaves(),
-          (excludedNodes == null? "no": excludedNodes.size())));
-    } else if (blockType == BlockType.STRIPED &&
-        targets.length < ecPolicy.getNumDataUnits()) {
-      throw new IOException(
-          String.format(ERROR_MESSAGE, src, targets.length,
-              ecPolicy.getNumDataUnits(),
-              String.format("required nodes for %s", ecPolicy.getName()),
-              getDatanodeManager().getNetworkTopology().getNumOfLeaves(),
-              (excludedNodes == null ? "no" : excludedNodes.size())));
+    if (targets.length < minReplication) {
+      throw new IOException("File " + src + " could only be replicated to "
+          + targets.length + " nodes instead of minReplication (="
+          + minReplication + ").  There are "
+          + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
+          + " datanode(s) running and "
+          + (excludedNodes == null? "no": excludedNodes.size())
+          + " node(s) are excluded in this operation.");
     }
     return targets;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
index fc5ac9e..012e916 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
@@ -201,7 +201,7 @@ class FSDirWriteFileOp {
     }
     storagePolicyID = pendingFile.getStoragePolicyID();
     return new ValidateAddBlockResult(blockSize, numTargets, storagePolicyID,
-                                      clientMachine, blockType, ecPolicy);
+                                      clientMachine, blockType);
   }
 
   static LocatedBlock makeLocatedBlock(FSNamesystem fsn, BlockInfo blk,
@@ -286,7 +286,7 @@ class FSDirWriteFileOp {
     return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                     excludedNodesSet, r.blockSize,
                                     favoredNodesList, r.storagePolicyID,
-                                    r.blockType, r.ecPolicy, flags);
+                                    r.blockType, flags);
   }
 
   /**
@@ -836,23 +836,15 @@ class FSDirWriteFileOp {
     final byte storagePolicyID;
     final String clientMachine;
     final BlockType blockType;
-    final ErasureCodingPolicy ecPolicy;
 
     ValidateAddBlockResult(
         long blockSize, int numTargets, byte storagePolicyID,
-        String clientMachine, BlockType blockType,
-        ErasureCodingPolicy ecPolicy) {
+        String clientMachine, BlockType blockType) {
       this.blockSize = blockSize;
       this.numTargets = numTargets;
       this.storagePolicyID = storagePolicyID;
       this.clientMachine = clientMachine;
       this.blockType = blockType;
-      this.ecPolicy = ecPolicy;
-
-      if (blockType == BlockType.STRIPED) {
-        Preconditions.checkArgument(ecPolicy != null,
-            "ecPolicy is not specified for striped block");
-      }
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
index f770673..f63a353 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedOutputStreamWithFailure.java
@@ -42,7 +42,6 @@ import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
 import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.log4j.Level;
 import org.junit.Assert;
@@ -283,7 +282,7 @@ public class TestDFSStripedOutputStreamWithFailure {
 
   @Test(timeout = 90000)
   public void testAddBlockWhenNoSufficientDataBlockNumOfNodes()
-      throws Exception {
+      throws IOException {
     HdfsConfiguration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
     try {
@@ -302,18 +301,20 @@ public class TestDFSStripedOutputStreamWithFailure {
           DatanodeReportType.LIVE);
       assertEquals("Mismatches number of live Dns ", numDatanodes, info.length);
       final Path dirFile = new Path(dir, "ecfile");
-      LambdaTestUtils.intercept(
-          IOException.class,
-          "File " + dirFile + " could only be written to " +
-              numDatanodes + " of the " + dataBlocks + " required nodes for " +
-              getEcPolicy().getName(),
-          () -> {
-            try (FSDataOutputStream out = dfs.create(dirFile, true)) {
-              out.write("something".getBytes());
-              out.flush();
-            }
-            return 0;
-          });
+      FSDataOutputStream out;
+      try {
+        out = dfs.create(dirFile, true);
+        out.write("something".getBytes());
+        out.flush();
+        out.close();
+        Assert.fail("Failed to validate available dns against blkGroupSize");
+      } catch (IOException ioe) {
+        // expected
+        GenericTestUtils.assertExceptionContains("Failed to get " +
+            dataBlocks + " nodes from namenode: blockGroupSize= " +
+            (dataBlocks + parityBlocks) + ", blocks.length= " +
+            numDatanodes, ioe);
+      }
     } finally {
       tearDown();
     }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/de9994bd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index b6c1318..74be90c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -175,8 +175,8 @@ public class TestDeadDatanode {
     // choose the targets, but local node should not get selected as this is not
     // part of the cluster anymore
     DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
-        clientNode, new HashSet<>(), 256 * 1024 * 1024L, null, (byte) 7,
-        BlockType.CONTIGUOUS, null, null);
+        clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
+        BlockType.CONTIGUOUS, null);
     for (DatanodeStorageInfo datanodeStorageInfo : results) {
       assertFalse("Dead node should not be choosen", datanodeStorageInfo
           .getDatanodeDescriptor().equals(clientNode));


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org