You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cn...@apache.org on 2013/07/18 07:32:23 UTC
svn commit: r1504365 - in /hadoop/common/branches/branch-1-win: ./
src/core/org/apache/hadoop/net/
src/hdfs/org/apache/hadoop/hdfs/server/namenode/
Author: cnauroth
Date: Thu Jul 18 05:32:23 2013
New Revision: 1504365
URL: http://svn.apache.org/r1504365
Log:
HDFS-5001. TestAzureBlockPlacementPolicy and TestReplicationPolicyWithNodeGroup failed caused by 1) old APIs and 2) incorrect value of depthOfAllLeaves. Contributed by Xi Fang.
Modified:
hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt
hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/net/NetworkTopology.java
hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/AzureBlockPlacementPolicy.java
hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyWithNodeGroup.java
Modified: hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt?rev=1504365&r1=1504364&r2=1504365&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt (original)
+++ hadoop/common/branches/branch-1-win/CHANGES.branch-1-win.txt Thu Jul 18 05:32:23 2013
@@ -316,6 +316,10 @@ Branch-hadoop-1-win (branched from branc
MAPREDUCE-5391. TestNonLocalJobJarSubmission fails on Windows due to missing
classpath entries. (cnauroth)
+ HDFS-5001. TestAzureBlockPlacementPolicy and
+ TestReplicationPolicyWithNodeGroup failed caused by 1) old APIs and
+ 2) incorrect value of depthOfAllLeaves. (Xi Fang via cnauroth)
+
Merged from branch-1
HDFS-385. Backport: Add support for an experimental API that allows a
Modified: hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/net/NetworkTopology.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/net/NetworkTopology.java?rev=1504365&r1=1504364&r2=1504365&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/net/NetworkTopology.java (original)
+++ hadoop/common/branches/branch-1-win/src/core/org/apache/hadoop/net/NetworkTopology.java Thu Jul 18 05:32:23 2013
@@ -381,22 +381,23 @@ public class NetworkTopology {
+ node.toString()
+ " at an illegal network location");
}
+ if (clusterMap.getNumOfLeaves() == 0) {
+ depthOfAllLeaves = -1;
+ }
if (clusterMap.add(node)) {
LOG.info("Adding a new node: "+NodeBase.getPath(node));
if (rack == null) {
numOfRacks++;
}
- if (!(node instanceof InnerNode)) {
- if (depthOfAllLeaves == -1) {
- depthOfAllLeaves = node.getLevel();
- } else {
- if (depthOfAllLeaves != node.getLevel()) {
- LOG.error("Error: can't add leaf node at depth " +
- node.getLevel() + " to topology:\n" + oldTopoStr);
- throw new InvalidTopologyException("Invalid network topology. " +
- "You cannot have a rack and a non-rack node at the same " +
- "level of the network topology.");
- }
+ if (depthOfAllLeaves == -1) {
+ depthOfAllLeaves = node.getLevel();
+ } else {
+ if (depthOfAllLeaves != node.getLevel()) {
+ LOG.error("Error: can't add leaf node at depth " +
+ node.getLevel() + " to topology:\n" + oldTopoStr);
+ throw new InvalidTopologyException("Invalid network topology. " +
+ "You cannot have a rack and a non-rack node at the same " +
+ "level of the network topology.");
}
}
}
Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/AzureBlockPlacementPolicy.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/AzureBlockPlacementPolicy.java?rev=1504365&r1=1504364&r2=1504365&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/AzureBlockPlacementPolicy.java (original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/AzureBlockPlacementPolicy.java Thu Jul 18 05:32:23 2013
@@ -360,7 +360,8 @@ public class AzureBlockPlacementPolicy e
@Override
protected DatanodeDescriptor chooseTarget(int numOfReplicas,
DatanodeDescriptor writer, HashMap<Node, Node> excludedNodes,
- long blocksize, int maxNodesPerRack, List<DatanodeDescriptor> results) {
+ long blocksize, int maxNodesPerRack, List<DatanodeDescriptor> results,
+ boolean avoidStaleNodes) {
if (numOfReplicas == 0 || clusterMap.getNumOfLeaves() == 0) {
return writer;
Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java?rev=1504365&r1=1504364&r2=1504365&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java (original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java Thu Jul 18 05:32:23 2013
@@ -522,8 +522,7 @@ public class BlockPlacementPolicyDefault
protected boolean isGoodTarget(DatanodeDescriptor node,
long blockSize, int maxTargetPerLoc,
List<DatanodeDescriptor> results) {
- return isGoodTarget(node, blockSize, maxTargetPerLoc,
- this.considerLoad, results, false);
+ return isGoodTarget(node, blockSize, maxTargetPerLoc, results, false);
}
/* judge if a node is a good target.
Modified: hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyWithNodeGroup.java
URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyWithNodeGroup.java?rev=1504365&r1=1504364&r2=1504365&view=diff
==============================================================================
--- hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyWithNodeGroup.java (original)
+++ hadoop/common/branches/branch-1-win/src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyWithNodeGroup.java Thu Jul 18 05:32:23 2013
@@ -66,18 +66,18 @@ public class BlockPlacementPolicyWithNod
@Override
protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine,
HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
- List<DatanodeDescriptor> results)
+ List<DatanodeDescriptor> results, boolean avoidStaleNodes)
throws NotEnoughReplicasException {
// if no local machine, randomly choose one node
if (localMachine == null)
return chooseRandom(NodeBase.ROOT, excludedNodes,
- blocksize, maxNodesPerRack, results);
+ blocksize, maxNodesPerRack, results, avoidStaleNodes);
// otherwise try local machine first
Node oldNode = excludedNodes.put(localMachine, localMachine);
if (oldNode == null) { // was not in the excluded list
if (isGoodTarget(localMachine, blocksize,
- maxNodesPerRack, false, results)) {
+ maxNodesPerRack, false, results, avoidStaleNodes)) {
results.add(localMachine);
// Nodes under same nodegroup should be excluded.
addNodeGroupToExcludedNodes(excludedNodes,
@@ -89,13 +89,13 @@ public class BlockPlacementPolicyWithNod
// try a node on local node group
DatanodeDescriptor chosenNode = chooseLocalNodeGroup(
(NetworkTopologyWithNodeGroup)clusterMap, localMachine, excludedNodes,
- blocksize, maxNodesPerRack, results);
+ blocksize, maxNodesPerRack, results, avoidStaleNodes);
if (chosenNode != null) {
return chosenNode;
}
// try a node on local rack
return chooseLocalRack(localMachine, excludedNodes,
- blocksize, maxNodesPerRack, results);
+ blocksize, maxNodesPerRack, results, avoidStaleNodes);
}
@Override
@@ -118,12 +118,13 @@ public class BlockPlacementPolicyWithNod
@Override
protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine,
HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
- List<DatanodeDescriptor> results)
+ List<DatanodeDescriptor> results, boolean avoidStaleNodes)
throws NotEnoughReplicasException {
// no local machine, so choose a random machine
if (localMachine == null) {
return chooseRandom(NodeBase.ROOT, excludedNodes,
- blocksize, maxNodesPerRack, results);
+ blocksize, maxNodesPerRack, results,
+ avoidStaleNodes);
}
// choose one from the local rack, but off-nodegroup
@@ -131,7 +132,8 @@ public class BlockPlacementPolicyWithNod
return chooseRandom(NetworkTopology.getFirstHalf(
localMachine.getNetworkLocation()),
excludedNodes, blocksize,
- maxNodesPerRack, results);
+ maxNodesPerRack, results,
+ avoidStaleNodes);
} catch (NotEnoughReplicasException e1) {
// find the second replica
DatanodeDescriptor newLocal=null;
@@ -147,16 +149,16 @@ public class BlockPlacementPolicyWithNod
try {
return chooseRandom(
clusterMap.getRack(newLocal.getNetworkLocation()), excludedNodes,
- blocksize, maxNodesPerRack, results);
+ blocksize, maxNodesPerRack, results, avoidStaleNodes);
} catch(NotEnoughReplicasException e2) {
//otherwise randomly choose one from the network
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
- maxNodesPerRack, results);
+ maxNodesPerRack, results, avoidStaleNodes);
}
} else {
//otherwise randomly choose one from the network
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
- maxNodesPerRack, results);
+ maxNodesPerRack, results, avoidStaleNodes);
}
}
}
@@ -164,19 +166,20 @@ public class BlockPlacementPolicyWithNod
@Override
protected void chooseRemoteRack(int numOfReplicas,
DatanodeDescriptor localMachine, HashMap<Node, Node> excludedNodes,
- long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results
- ) throws NotEnoughReplicasException {
+ long blocksize, int maxReplicasPerRack, List<DatanodeDescriptor> results,
+ boolean avoidStaleNodes) throws NotEnoughReplicasException {
int oldNumOfReplicas = results.size();
// randomly choose one node from remote racks
try {
chooseRandom(
numOfReplicas,
"~" + NetworkTopology.getFirstHalf(localMachine.getNetworkLocation()),
- excludedNodes, blocksize, maxReplicasPerRack, results);
+ excludedNodes, blocksize, maxReplicasPerRack, results,
+ avoidStaleNodes);
} catch (NotEnoughReplicasException e) {
chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas),
localMachine.getNetworkLocation(), excludedNodes, blocksize,
- maxReplicasPerRack, results);
+ maxReplicasPerRack, results, avoidStaleNodes);
}
}
@@ -189,19 +192,20 @@ public class BlockPlacementPolicyWithNod
private DatanodeDescriptor chooseLocalNodeGroup(
NetworkTopologyWithNodeGroup clusterMap, DatanodeDescriptor localMachine,
HashMap<Node, Node> excludedNodes, long blocksize, int maxNodesPerRack,
- List<DatanodeDescriptor> results)
+ List<DatanodeDescriptor> results, boolean avoidStaleNodes)
throws NotEnoughReplicasException {
// no local machine, so choose a random machine
if (localMachine == null) {
return chooseRandom(NodeBase.ROOT, excludedNodes,
- blocksize, maxNodesPerRack, results);
+ blocksize, maxNodesPerRack, results, avoidStaleNodes);
}
// choose one from the local node group
try {
return chooseRandom(
clusterMap.getNodeGroup(localMachine.getNetworkLocation()),
- excludedNodes, blocksize, maxNodesPerRack, results);
+ excludedNodes, blocksize, maxNodesPerRack, results,
+ avoidStaleNodes);
} catch (NotEnoughReplicasException e1) {
// find the second replica
DatanodeDescriptor newLocal=null;
@@ -217,16 +221,16 @@ public class BlockPlacementPolicyWithNod
try {
return chooseRandom(
clusterMap.getNodeGroup(newLocal.getNetworkLocation()),
- excludedNodes, blocksize, maxNodesPerRack, results);
+ excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes);
} catch(NotEnoughReplicasException e2) {
//otherwise randomly choose one from the network
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
- maxNodesPerRack, results);
+ maxNodesPerRack, results, avoidStaleNodes);
}
} else {
//otherwise randomly choose one from the network
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
- maxNodesPerRack, results);
+ maxNodesPerRack, results, avoidStaleNodes);
}
}
}