You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by vi...@apache.org on 2015/12/18 07:23:39 UTC
[2/3] hadoop git commit: HDFS-9393. After choosing favored nodes,
choosing nodes for remaining replicas should go through
BlockPlacementPolicy (Contributed by J.Andreina)
HDFS-9393. After choosing favored nodes, choosing nodes for remaining replicas should go through BlockPlacementPolicy (Contributed by J.Andreina)
(cherry picked from commit bfadf11b36e9d97e03d6ed1e71829907c2301412)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c887bcd1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c887bcd1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c887bcd1
Branch: refs/heads/branch-2
Commit: c887bcd1f03244300a45ff70df9a9c9f2ad5a431
Parents: af49823
Author: Vinayakumar B <vi...@apache.org>
Authored: Fri Dec 18 11:38:12 2015 +0530
Committer: Vinayakumar B <vi...@apache.org>
Committed: Fri Dec 18 11:39:46 2015 +0530
----------------------------------------------------------------------
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++
.../BlockPlacementPolicyDefault.java | 15 ++++++--
.../blockmanagement/TestReplicationPolicy.java | 40 ++++++++++++++++++++
.../TestReplicationPolicyWithNodeGroup.java | 32 ++++++++++++++++
4 files changed, 87 insertions(+), 4 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c887bcd1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 5402344..3b9950e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1654,6 +1654,10 @@ Release 2.8.0 - UNRELEASED
HDFS-9571. Fix ASF Licence warnings in Jenkins reports
(Brahma Reddy Battula via cnauroth)
+ HDFS-9393. After choosing favored nodes, choosing nodes for remaining
+ replicas should go through BlockPlacementPolicy
+ (J.Andreina via vinayakumarb)
+
Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c887bcd1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 08e7851..14439e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -147,11 +147,18 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
avoidStaleNodes, storageTypes);
if (results.size() < numOfReplicas) {
- // Not enough favored nodes, choose other nodes.
+ // Not enough favored nodes, choose other nodes, based on block
+ // placement policy (HDFS-9393).
numOfReplicas -= results.size();
- DatanodeStorageInfo[] remainingTargets =
- chooseTarget(src, numOfReplicas, writer, results,
- false, favoriteAndExcludedNodes, blocksize, storagePolicy);
+ for (DatanodeStorageInfo storage : results) {
+ // add localMachine and related nodes to favoriteAndExcludedNodes
+ addToExcludedNodes(storage.getDatanodeDescriptor(),
+ favoriteAndExcludedNodes);
+ }
+ DatanodeStorageInfo[] remainingTargets =
+ chooseTarget(src, numOfReplicas, writer,
+ new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
+ favoriteAndExcludedNodes, blocksize, storagePolicy);
for (int i = 0; i < remainingTargets.length; i++) {
results.add(remainingTargets[i]);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c887bcd1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 6c2d00b..5fa9851 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -1454,4 +1454,44 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1);
assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0);
}
+
+ /**
+ * In this testcase, passed 2 favored nodes dataNodes[0],dataNodes[1]
+ *
+ * Both favored nodes should be chosen as target for placing replication and
+ * then should fall into BlockPlacement policy for choosing remaining targets
+ * ie. third target as local writer rack , forth target on remote rack and
+ * fifth on same rack as second.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testChooseExcessReplicaApartFromFavoredNodes() throws Exception {
+ DatanodeStorageInfo[] targets;
+ List<DatanodeDescriptor> expectedTargets =
+ new ArrayList<DatanodeDescriptor>();
+ expectedTargets.add(dataNodes[0]);
+ expectedTargets.add(dataNodes[1]);
+ expectedTargets.add(dataNodes[2]);
+ expectedTargets.add(dataNodes[4]);
+ expectedTargets.add(dataNodes[5]);
+ List<DatanodeDescriptor> favouredNodes =
+ new ArrayList<DatanodeDescriptor>();
+ favouredNodes.add(dataNodes[0]);
+ favouredNodes.add(dataNodes[1]);
+ targets = chooseTarget(5, dataNodes[2], null, favouredNodes);
+ assertEquals(targets.length, 5);
+ for (int i = 0; i < targets.length; i++) {
+ assertTrue("Target should be a part of Expected Targets",
+ expectedTargets.contains(targets[i].getDatanodeDescriptor()));
+ }
+ }
+
+ private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
+ DatanodeDescriptor writer, Set<Node> excludedNodes,
+ List<DatanodeDescriptor> favoredNodes) {
+ return replicator.chooseTarget(filename, numOfReplicas, writer,
+ excludedNodes, BLOCK_SIZE, favoredNodes,
+ TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+ }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c887bcd1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
index b46983c..8ba9fb6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
@@ -781,4 +781,36 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
assertTrue("2nd Replica is incorrect",
expectedTargets.contains(targets[1].getDatanodeDescriptor()));
}
+
+ /**
+ * In this testcase, passed 3 favored nodes
+ * dataNodes[0],dataNodes[1],dataNodes[2]
+ *
+ * Favored nodes on different nodegroup should be selected. Remaining replica
+ * should go through BlockPlacementPolicy.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testChooseRemainingReplicasApartFromFavoredNodes()
+ throws Exception {
+ DatanodeStorageInfo[] targets;
+ List<DatanodeDescriptor> expectedTargets =
+ new ArrayList<DatanodeDescriptor>();
+ expectedTargets.add(dataNodes[0]);
+ expectedTargets.add(dataNodes[2]);
+ expectedTargets.add(dataNodes[3]);
+ expectedTargets.add(dataNodes[6]);
+ expectedTargets.add(dataNodes[7]);
+ List<DatanodeDescriptor> favouredNodes =
+ new ArrayList<DatanodeDescriptor>();
+ favouredNodes.add(dataNodes[0]);
+ favouredNodes.add(dataNodes[1]);
+ favouredNodes.add(dataNodes[2]);
+ targets = chooseTarget(3, dataNodes[3], null, favouredNodes);
+ for (int i = 0; i < targets.length; i++) {
+ assertTrue("Target should be a part of Expected Targets",
+ expectedTargets.contains(targets[i].getDatanodeDescriptor()));
+ }
+ }
}