You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ay...@apache.org on 2020/05/18 17:00:52 UTC
[hadoop] branch trunk updated: HDFS-14999. Avoid Potential Infinite
Loop in DFSNetworkTopology. Contributed by Ayush Saxena.
This is an automated email from the ASF dual-hosted git repository.
ayushsaxena pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new c84e6be HDFS-14999. Avoid Potential Infinite Loop in DFSNetworkTopology. Contributed by Ayush Saxena.
c84e6be is described below
commit c84e6beada4e604175f7f138c9878a29665a8c47
Author: Ayush Saxena <ay...@apache.org>
AuthorDate: Mon May 18 21:06:46 2020 +0530
HDFS-14999. Avoid Potential Infinite Loop in DFSNetworkTopology. Contributed by Ayush Saxena.
---
.../apache/hadoop/hdfs/net/DFSNetworkTopology.java | 51 ++++++++++++++--------
1 file changed, 32 insertions(+), 19 deletions(-)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
index e4e4350..dbc5dea 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java
@@ -249,17 +249,10 @@ public class DFSNetworkTopology extends NetworkTopology {
return null;
}
// to this point, it is guaranteed that there is at least one node
- // that satisfies the requirement, keep trying until we found one.
- Node chosen;
- do {
- chosen = chooseRandomWithStorageTypeAndExcludeRoot(root, excludeRoot,
- type);
- if (excludedNodes == null || !excludedNodes.contains(chosen)) {
- break;
- } else {
- LOG.debug("Node {} is excluded, continuing.", chosen);
- }
- } while (true);
+ // that satisfies the requirement.
+ Node chosen =
+ chooseRandomWithStorageTypeAndExcludeRoot(root, excludeRoot, type,
+ excludedNodes);
LOG.debug("chooseRandom returning {}", chosen);
return chosen;
}
@@ -268,23 +261,23 @@ public class DFSNetworkTopology extends NetworkTopology {
* Choose a random node that has the required storage type, under the given
* root, with an excluded subtree root (could also just be a leaf node).
*
- * Note that excludedNode is checked after a random node, so it is not being
- * handled here.
- *
* @param root the root node where we start searching for a datanode
* @param excludeRoot the root of the subtree what should be excluded
* @param type the expected storage type
+ * @param excludedNodes the list of nodes to be excluded
* @return a random datanode, with the storage type, and is not in excluded
* scope
*/
private Node chooseRandomWithStorageTypeAndExcludeRoot(
- DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
+ DFSTopologyNodeImpl root, Node excludeRoot, StorageType type,
+ Collection<Node> excludedNodes) {
Node chosenNode;
if (root.isRack()) {
// children are datanode descriptor
ArrayList<Node> candidates = new ArrayList<>();
for (Node node : root.getChildren()) {
- if (node.equals(excludeRoot)) {
+ if (node.equals(excludeRoot) || (excludedNodes != null && excludedNodes
+ .contains(node))) {
continue;
}
DatanodeDescriptor dnDescriptor = (DatanodeDescriptor)node;
@@ -301,7 +294,7 @@ public class DFSNetworkTopology extends NetworkTopology {
} else {
// the children are inner nodes
ArrayList<DFSTopologyNodeImpl> candidates =
- getEligibleChildren(root, excludeRoot, type);
+ getEligibleChildren(root, excludeRoot, type, excludedNodes);
if (candidates.size() == 0) {
return null;
}
@@ -330,7 +323,7 @@ public class DFSNetworkTopology extends NetworkTopology {
}
DFSTopologyNodeImpl nextRoot = candidates.get(idxChosen);
chosenNode = chooseRandomWithStorageTypeAndExcludeRoot(
- nextRoot, excludeRoot, type);
+ nextRoot, excludeRoot, type, excludedNodes);
}
return chosenNode;
}
@@ -343,11 +336,13 @@ public class DFSNetworkTopology extends NetworkTopology {
* @param root the subtree root we check.
* @param excludeRoot the root of the subtree that should be excluded.
* @param type the storage type we look for.
+ * @param excludedNodes the list of excluded nodes.
* @return a list of possible nodes, each of them is eligible as the next
* level root we search.
*/
private ArrayList<DFSTopologyNodeImpl> getEligibleChildren(
- DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
+ DFSTopologyNodeImpl root, Node excludeRoot, StorageType type,
+ Collection<Node> excludedNodes) {
ArrayList<DFSTopologyNodeImpl> candidates = new ArrayList<>();
int excludeCount = 0;
if (excludeRoot != null && root.isAncestor(excludeRoot)) {
@@ -374,6 +369,24 @@ public class DFSNetworkTopology extends NetworkTopology {
(dfsNode.isAncestor(excludeRoot) || dfsNode.equals(excludeRoot))) {
storageCount -= excludeCount;
}
+ if (excludedNodes != null) {
+ for (Node excludedNode : excludedNodes) {
+ if (excludeRoot != null && isNodeInScope(excludedNode,
+ NodeBase.getPath(excludeRoot))) {
+ continue;
+ }
+ if (isNodeInScope(excludedNode, NodeBase.getPath(node))) {
+ if (excludedNode instanceof DatanodeDescriptor) {
+ storageCount -=
+ ((DatanodeDescriptor) excludedNode).hasStorageType(type) ?
+ 1 : 0;
+ } else if (excludedNode instanceof DFSTopologyNodeImpl) {
+ storageCount -= ((DFSTopologyNodeImpl) excludedNode)
+ .getSubtreeStorageCount(type);
+ }
+ }
+ }
+ }
if (storageCount > 0) {
candidates.add(dfsNode);
}
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org