You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ji...@apache.org on 2015/07/14 19:56:12 UTC
hadoop git commit: HDFS-8702. Erasure coding: update
BlockManager.blockHasEnoughRacks(..) logic for striped block. Contributed by
Kai Sasaki.
Repository: hadoop
Updated Branches:
refs/heads/HDFS-7285 b1e6429a6 -> 6ff957be8
HDFS-8702. Erasure coding: update BlockManager.blockHasEnoughRacks(..) logic for striped block. Contributed by Kai Sasaki.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ff957be
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ff957be
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ff957be
Branch: refs/heads/HDFS-7285
Commit: 6ff957be88d48a8b41e9fcbe4cf466d672cd7bc1
Parents: b1e6429
Author: Jing Zhao <ji...@apache.org>
Authored: Tue Jul 14 10:55:58 2015 -0700
Committer: Jing Zhao <ji...@apache.org>
Committed: Tue Jul 14 10:55:58 2015 -0700
----------------------------------------------------------------------
.../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ++
.../server/blockmanagement/BlockManager.java | 43 +++++++++++++++++++-
2 files changed, 44 insertions(+), 2 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ff957be/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index cd9e19d..2b91295 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -347,3 +347,6 @@
HDFS-8669. Erasure Coding: handle missing internal block locations in
DFSStripedInputStream. (jing9)
+
+ HDFS-8702. Erasure coding: update BlockManager.blockHasEnoughRacks(..) logic
+ for striped block. (Kai Sasaki via jing9)
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ff957be/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 968dc0c..1aaf225 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3819,14 +3819,53 @@ public class BlockManager {
return toInvalidate.size();
}
- // TODO: update the enough rack logic for striped blocks
boolean blockHasEnoughRacks(BlockInfo storedBlock, int expectedStorageNum) {
if (!this.shouldCheckForEnoughRacks) {
return true;
}
- boolean enoughRacks = false;
Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(storedBlock);
+
+ if (storedBlock.isStriped()) {
+ return blockHasEnoughRacksStriped(storedBlock, corruptNodes);
+ } else {
+ return blockHashEnoughRacksContiguous(storedBlock, expectedStorageNum,
+ corruptNodes);
+ }
+ }
+
+ /**
+ * Verify whether given striped block is distributed through enough racks.
+ * As dicussed in HDFS-7613, ec file requires racks at least as many as
+ * the number of data block number.
+ */
+ boolean blockHasEnoughRacksStriped(BlockInfo storedBlock,
+ Collection<DatanodeDescriptor> corruptNodes) {
+ if (!datanodeManager.hasClusterEverBeenMultiRack()) {
+ return true;
+ }
+ boolean enoughRacks = false;
+ Set<String> rackNameSet = new HashSet<>();
+ int dataBlockNum = ((BlockInfoStriped)storedBlock).getRealDataBlockNum();
+ for (DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
+ final DatanodeDescriptor cur = storage.getDatanodeDescriptor();
+ if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) {
+ if ((corruptNodes == null) || !corruptNodes.contains(cur)) {
+ String rackNameNew = cur.getNetworkLocation();
+ rackNameSet.add(rackNameNew);
+ if (rackNameSet.size() >= dataBlockNum) {
+ enoughRacks = true;
+ break;
+ }
+ }
+ }
+ }
+ return enoughRacks;
+ }
+
+ boolean blockHashEnoughRacksContiguous(BlockInfo storedBlock,
+ int expectedStorageNum, Collection<DatanodeDescriptor> corruptNodes) {
+ boolean enoughRacks = false;
String rackName = null;
for(DatanodeStorageInfo storage : blocksMap.getStorages(storedBlock)) {
final DatanodeDescriptor cur = storage.getDatanodeDescriptor();