You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by cd...@apache.org on 2015/07/07 00:16:06 UTC
[1/2] hadoop git commit: HADOOP-12185. NetworkTopology is not
efficient adding/getting/removing nodes. Contributed by Inigo Goiri
Repository: hadoop
Updated Branches:
refs/heads/branch-2 d01eaef40 -> 360164e41
refs/heads/trunk ed1e3ce48 -> 47a69ec7a
HADOOP-12185. NetworkTopology is not efficient adding/getting/removing nodes. Contributed by Inigo Goiri
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47a69ec7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47a69ec7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47a69ec7
Branch: refs/heads/trunk
Commit: 47a69ec7a5417cb56b75d07184dd6888ff068302
Parents: ed1e3ce
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Jul 6 15:03:22 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Jul 6 15:03:22 2015 -0700
----------------------------------------------------------------------
.../org/apache/hadoop/net/NetworkTopology.java | 59 +++++++--------
.../apache/hadoop/net/TestClusterTopology.java | 75 ++++++++++++++++++--
2 files changed, 102 insertions(+), 32 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47a69ec7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 63b6763..970ad40 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -18,9 +18,11 @@
package org.apache.hadoop.net;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import java.util.concurrent.locks.ReadWriteLock;
@@ -80,6 +82,7 @@ public class NetworkTopology {
*/
static class InnerNode extends NodeBase {
protected List<Node> children=new ArrayList<Node>();
+ private Map<String, Node> childrenMap = new HashMap<String, Node>();
private int numOfLeaves;
/** Construct an InnerNode from a path-like string */
@@ -171,10 +174,13 @@ public class NetworkTopology {
// this node is the parent of n; add n directly
n.setParent(this);
n.setLevel(this.level+1);
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(n.getName())) {
- children.set(i, n);
- return false;
+ Node prev = childrenMap.put(n.getName(), n);
+ if (prev != null) {
+ for(int i=0; i<children.size(); i++) {
+ if (children.get(i).getName().equals(n.getName())) {
+ children.set(i, n);
+ return false;
+ }
}
}
children.add(n);
@@ -183,17 +189,12 @@ public class NetworkTopology {
} else {
// find the next ancestor node
String parentName = getNextAncestorName(n);
- InnerNode parentNode = null;
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(parentName)) {
- parentNode = (InnerNode)children.get(i);
- break;
- }
- }
+ InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
if (parentNode == null) {
// create a new InnerNode
parentNode = createParentNode(parentName);
children.add(parentNode);
+ childrenMap.put(parentNode.getName(), parentNode);
}
// add n to the subtree of the next ancestor node
if (parentNode.add(n)) {
@@ -234,12 +235,15 @@ public class NetworkTopology {
+parent+", is not a descendent of "+currentPath);
if (isParent(n)) {
// this node is the parent of n; remove n directly
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(n.getName())) {
- children.remove(i);
- numOfLeaves--;
- n.setParent(null);
- return true;
+ if (childrenMap.containsKey(n.getName())) {
+ for (int i=0; i<children.size(); i++) {
+ if (children.get(i).getName().equals(n.getName())) {
+ children.remove(i);
+ childrenMap.remove(n.getName());
+ numOfLeaves--;
+ n.setParent(null);
+ return true;
+ }
}
}
return false;
@@ -262,7 +266,8 @@ public class NetworkTopology {
// if the parent node has no children, remove the parent node too
if (isRemoved) {
if (parentNode.getNumOfChildren() == 0) {
- children.remove(i);
+ Node prev = children.remove(i);
+ childrenMap.remove(prev.getName());
}
numOfLeaves--;
}
@@ -279,12 +284,7 @@ public class NetworkTopology {
if (loc == null || loc.length() == 0) return this;
String[] path = loc.split(PATH_SEPARATOR_STR, 2);
- Node childnode = null;
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(path[0])) {
- childnode = children.get(i);
- }
- }
+ Node childnode = childrenMap.get(path[0]);
if (childnode == null) return null; // non-existing node
if (path.length == 1) return childnode;
if (childnode instanceof InnerNode) {
@@ -311,10 +311,13 @@ public class NetworkTopology {
isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves();
if (isLeafParent()) { // children are leaves
if (isLeaf) { // excluded node is a leaf node
- int excludedIndex = children.indexOf(excludedNode);
- if (excludedIndex != -1 && leafIndex >= 0) {
- // excluded node is one of the children so adjust the leaf index
- leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+ if (excludedNode != null &&
+ childrenMap.containsKey(excludedNode.getName())) {
+ int excludedIndex = children.indexOf(excludedNode);
+ if (excludedIndex != -1 && leafIndex >= 0) {
+ // excluded node is one of the children so adjust the leaf index
+ leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+ }
}
}
// range check
http://git-wip-us.apache.org/repos/asf/hadoop/blob/47a69ec7/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
index 3ab663f..72fc5cb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.net;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import org.apache.commons.math3.stat.inference.ChiSquareTest;
import org.junit.Assert;
import org.junit.Test;
@@ -79,12 +81,14 @@ public class TestClusterTopology extends Assert {
public void testCountNumNodes() throws Exception {
// create the topology
NetworkTopology cluster = new NetworkTopology();
- cluster.add(getNewNode("node1", "/d1/r1"));
+ NodeElement node1 = getNewNode("node1", "/d1/r1");
+ cluster.add(node1);
NodeElement node2 = getNewNode("node2", "/d1/r2");
cluster.add(node2);
- cluster.add(getNewNode("node3", "/d1/r3"));
- NodeElement node3 = getNewNode("node4", "/d1/r4");
+ NodeElement node3 = getNewNode("node3", "/d1/r3");
cluster.add(node3);
+ NodeElement node4 = getNewNode("node4", "/d1/r4");
+ cluster.add(node4);
// create exclude list
List<Node> excludedNodes = new ArrayList<Node>();
@@ -95,7 +99,7 @@ public class TestClusterTopology extends Assert {
assertEquals("4 nodes should be available with extra excluded Node", 4,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
// add one existing node to exclude list
- excludedNodes.add(node3);
+ excludedNodes.add(node4);
assertEquals("excluded nodes with ROOT scope should be considered", 3,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
assertEquals("excluded nodes without ~ scope should be considered", 2,
@@ -112,6 +116,69 @@ public class TestClusterTopology extends Assert {
// getting count with non-exist scope.
assertEquals("No nodes should be considered for non-exist scope", 0,
cluster.countNumOfAvailableNodes("/non-exist", excludedNodes));
+ // remove a node from the cluster
+ cluster.remove(node1);
+ assertEquals("1 node should be available", 1,
+ cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
+ }
+
+ /**
+ * Test how well we pick random nodes.
+ */
+ @Test
+ public void testChooseRandom() {
+ // create the topology
+ NetworkTopology cluster = new NetworkTopology();
+ NodeElement node1 = getNewNode("node1", "/d1/r1");
+ cluster.add(node1);
+ NodeElement node2 = getNewNode("node2", "/d1/r2");
+ cluster.add(node2);
+ NodeElement node3 = getNewNode("node3", "/d1/r3");
+ cluster.add(node3);
+ NodeElement node4 = getNewNode("node4", "/d1/r3");
+ cluster.add(node4);
+
+ // Number of iterations to do the test
+ int numIterations = 100;
+
+ // Pick random nodes
+ HashMap<String,Integer> histogram = new HashMap<String,Integer>();
+ for (int i=0; i<numIterations; i++) {
+ String randomNode = cluster.chooseRandom(NodeBase.ROOT).getName();
+ if (!histogram.containsKey(randomNode)) {
+ histogram.put(randomNode, 0);
+ }
+ histogram.put(randomNode, histogram.get(randomNode) + 1);
+ }
+ assertEquals("Random is not selecting all nodes", 4, histogram.size());
+
+ // Check with 99% confidence (alpha=0.01 as confidence = (100 * (1 - alpha)
+ ChiSquareTest chiSquareTest = new ChiSquareTest();
+ double[] expected = new double[histogram.size()];
+ long[] observed = new long[histogram.size()];
+ int j=0;
+ for (Integer occurrence : histogram.values()) {
+ expected[j] = 1.0 * numIterations / histogram.size();
+ observed[j] = occurrence;
+ j++;
+ }
+ boolean chiSquareTestRejected =
+ chiSquareTest.chiSquareTest(expected, observed, 0.01);
+
+ // Check that they have the proper distribution
+ assertFalse("Not choosing nodes randomly", chiSquareTestRejected);
+
+ // Pick random nodes excluding the 2 nodes in /d1/r3
+ histogram = new HashMap<String,Integer>();
+ for (int i=0; i<numIterations; i++) {
+ String randomNode = cluster.chooseRandom("~/d1/r3").getName();
+ if (!histogram.containsKey(randomNode)) {
+ histogram.put(randomNode, 0);
+ }
+ histogram.put(randomNode, histogram.get(randomNode) + 1);
+ }
+ assertEquals("Random is not selecting the nodes it should",
+ 2, histogram.size());
}
private NodeElement getNewNode(String name, String rackLocation) {
[2/2] hadoop git commit: HADOOP-12185. NetworkTopology is not
efficient adding/getting/removing nodes. Contributed by Inigo Goiri (cherry
picked from commit 47a69ec7a5417cb56b75d07184dd6888ff068302)
Posted by cd...@apache.org.
HADOOP-12185. NetworkTopology is not efficient adding/getting/removing nodes. Contributed by Inigo Goiri
(cherry picked from commit 47a69ec7a5417cb56b75d07184dd6888ff068302)
Conflicts:
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/360164e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/360164e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/360164e4
Branch: refs/heads/branch-2
Commit: 360164e41e3bdf30b17f447ba466b47f1580c9f6
Parents: d01eaef
Author: Chris Douglas <cd...@apache.org>
Authored: Mon Jul 6 15:03:22 2015 -0700
Committer: Chris Douglas <cd...@apache.org>
Committed: Mon Jul 6 15:15:45 2015 -0700
----------------------------------------------------------------------
hadoop-common-project/hadoop-common/CHANGES.txt | 3 +
.../org/apache/hadoop/net/NetworkTopology.java | 60 ++++++++--------
.../apache/hadoop/net/TestClusterTopology.java | 75 ++++++++++++++++++--
3 files changed, 105 insertions(+), 33 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/360164e4/hadoop-common-project/hadoop-common/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index c8e0c65..b5c14c1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -439,6 +439,9 @@ Release 2.8.0 - UNRELEASED
HADOOP-12186. ActiveStandbyElector shouldn't call monitorLockNodeAsync
multiple times (zhihai xu via vinayakumarb)
+ HADOOP-12185. NetworkTopology is not efficient adding/getting/removing
+ nodes. (Inigo Goiri via cdouglas)
+
Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES
http://git-wip-us.apache.org/repos/asf/hadoop/blob/360164e4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
index 91410c7..970ad40 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
@@ -18,10 +18,11 @@
package org.apache.hadoop.net;
import java.util.ArrayList;
-import java.util.List;
+import java.util.HashMap;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
+import java.util.Map;
import java.util.Random;
import java.util.TreeMap;
import java.util.concurrent.locks.ReadWriteLock;
@@ -81,6 +82,7 @@ public class NetworkTopology {
*/
static class InnerNode extends NodeBase {
protected List<Node> children=new ArrayList<Node>();
+ private Map<String, Node> childrenMap = new HashMap<String, Node>();
private int numOfLeaves;
/** Construct an InnerNode from a path-like string */
@@ -172,10 +174,13 @@ public class NetworkTopology {
// this node is the parent of n; add n directly
n.setParent(this);
n.setLevel(this.level+1);
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(n.getName())) {
- children.set(i, n);
- return false;
+ Node prev = childrenMap.put(n.getName(), n);
+ if (prev != null) {
+ for(int i=0; i<children.size(); i++) {
+ if (children.get(i).getName().equals(n.getName())) {
+ children.set(i, n);
+ return false;
+ }
}
}
children.add(n);
@@ -184,17 +189,12 @@ public class NetworkTopology {
} else {
// find the next ancestor node
String parentName = getNextAncestorName(n);
- InnerNode parentNode = null;
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(parentName)) {
- parentNode = (InnerNode)children.get(i);
- break;
- }
- }
+ InnerNode parentNode = (InnerNode)childrenMap.get(parentName);
if (parentNode == null) {
// create a new InnerNode
parentNode = createParentNode(parentName);
children.add(parentNode);
+ childrenMap.put(parentNode.getName(), parentNode);
}
// add n to the subtree of the next ancestor node
if (parentNode.add(n)) {
@@ -235,12 +235,15 @@ public class NetworkTopology {
+parent+", is not a descendent of "+currentPath);
if (isParent(n)) {
// this node is the parent of n; remove n directly
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(n.getName())) {
- children.remove(i);
- numOfLeaves--;
- n.setParent(null);
- return true;
+ if (childrenMap.containsKey(n.getName())) {
+ for (int i=0; i<children.size(); i++) {
+ if (children.get(i).getName().equals(n.getName())) {
+ children.remove(i);
+ childrenMap.remove(n.getName());
+ numOfLeaves--;
+ n.setParent(null);
+ return true;
+ }
}
}
return false;
@@ -263,7 +266,8 @@ public class NetworkTopology {
// if the parent node has no children, remove the parent node too
if (isRemoved) {
if (parentNode.getNumOfChildren() == 0) {
- children.remove(i);
+ Node prev = children.remove(i);
+ childrenMap.remove(prev.getName());
}
numOfLeaves--;
}
@@ -280,12 +284,7 @@ public class NetworkTopology {
if (loc == null || loc.length() == 0) return this;
String[] path = loc.split(PATH_SEPARATOR_STR, 2);
- Node childnode = null;
- for(int i=0; i<children.size(); i++) {
- if (children.get(i).getName().equals(path[0])) {
- childnode = children.get(i);
- }
- }
+ Node childnode = childrenMap.get(path[0]);
if (childnode == null) return null; // non-existing node
if (path.length == 1) return childnode;
if (childnode instanceof InnerNode) {
@@ -312,10 +311,13 @@ public class NetworkTopology {
isLeaf ? 1 : ((InnerNode)excludedNode).getNumOfLeaves();
if (isLeafParent()) { // children are leaves
if (isLeaf) { // excluded node is a leaf node
- int excludedIndex = children.indexOf(excludedNode);
- if (excludedIndex != -1 && leafIndex >= 0) {
- // excluded node is one of the children so adjust the leaf index
- leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+ if (excludedNode != null &&
+ childrenMap.containsKey(excludedNode.getName())) {
+ int excludedIndex = children.indexOf(excludedNode);
+ if (excludedIndex != -1 && leafIndex >= 0) {
+ // excluded node is one of the children so adjust the leaf index
+ leafIndex = leafIndex>=excludedIndex ? leafIndex+1 : leafIndex;
+ }
}
}
// range check
http://git-wip-us.apache.org/repos/asf/hadoop/blob/360164e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
----------------------------------------------------------------------
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
index 3ab663f..72fc5cb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestClusterTopology.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.net;
import java.util.ArrayList;
+import java.util.HashMap;
import java.util.List;
+import org.apache.commons.math3.stat.inference.ChiSquareTest;
import org.junit.Assert;
import org.junit.Test;
@@ -79,12 +81,14 @@ public class TestClusterTopology extends Assert {
public void testCountNumNodes() throws Exception {
// create the topology
NetworkTopology cluster = new NetworkTopology();
- cluster.add(getNewNode("node1", "/d1/r1"));
+ NodeElement node1 = getNewNode("node1", "/d1/r1");
+ cluster.add(node1);
NodeElement node2 = getNewNode("node2", "/d1/r2");
cluster.add(node2);
- cluster.add(getNewNode("node3", "/d1/r3"));
- NodeElement node3 = getNewNode("node4", "/d1/r4");
+ NodeElement node3 = getNewNode("node3", "/d1/r3");
cluster.add(node3);
+ NodeElement node4 = getNewNode("node4", "/d1/r4");
+ cluster.add(node4);
// create exclude list
List<Node> excludedNodes = new ArrayList<Node>();
@@ -95,7 +99,7 @@ public class TestClusterTopology extends Assert {
assertEquals("4 nodes should be available with extra excluded Node", 4,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
// add one existing node to exclude list
- excludedNodes.add(node3);
+ excludedNodes.add(node4);
assertEquals("excluded nodes with ROOT scope should be considered", 3,
cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
assertEquals("excluded nodes without ~ scope should be considered", 2,
@@ -112,6 +116,69 @@ public class TestClusterTopology extends Assert {
// getting count with non-exist scope.
assertEquals("No nodes should be considered for non-exist scope", 0,
cluster.countNumOfAvailableNodes("/non-exist", excludedNodes));
+ // remove a node from the cluster
+ cluster.remove(node1);
+ assertEquals("1 node should be available", 1,
+ cluster.countNumOfAvailableNodes(NodeBase.ROOT, excludedNodes));
+ }
+
+ /**
+ * Test how well we pick random nodes.
+ */
+ @Test
+ public void testChooseRandom() {
+ // create the topology
+ NetworkTopology cluster = new NetworkTopology();
+ NodeElement node1 = getNewNode("node1", "/d1/r1");
+ cluster.add(node1);
+ NodeElement node2 = getNewNode("node2", "/d1/r2");
+ cluster.add(node2);
+ NodeElement node3 = getNewNode("node3", "/d1/r3");
+ cluster.add(node3);
+ NodeElement node4 = getNewNode("node4", "/d1/r3");
+ cluster.add(node4);
+
+ // Number of iterations to do the test
+ int numIterations = 100;
+
+ // Pick random nodes
+ HashMap<String,Integer> histogram = new HashMap<String,Integer>();
+ for (int i=0; i<numIterations; i++) {
+ String randomNode = cluster.chooseRandom(NodeBase.ROOT).getName();
+ if (!histogram.containsKey(randomNode)) {
+ histogram.put(randomNode, 0);
+ }
+ histogram.put(randomNode, histogram.get(randomNode) + 1);
+ }
+ assertEquals("Random is not selecting all nodes", 4, histogram.size());
+
+ // Check with 99% confidence (alpha=0.01 as confidence = (100 * (1 - alpha)
+ ChiSquareTest chiSquareTest = new ChiSquareTest();
+ double[] expected = new double[histogram.size()];
+ long[] observed = new long[histogram.size()];
+ int j=0;
+ for (Integer occurrence : histogram.values()) {
+ expected[j] = 1.0 * numIterations / histogram.size();
+ observed[j] = occurrence;
+ j++;
+ }
+ boolean chiSquareTestRejected =
+ chiSquareTest.chiSquareTest(expected, observed, 0.01);
+
+ // Check that they have the proper distribution
+ assertFalse("Not choosing nodes randomly", chiSquareTestRejected);
+
+ // Pick random nodes excluding the 2 nodes in /d1/r3
+ histogram = new HashMap<String,Integer>();
+ for (int i=0; i<numIterations; i++) {
+ String randomNode = cluster.chooseRandom("~/d1/r3").getName();
+ if (!histogram.containsKey(randomNode)) {
+ histogram.put(randomNode, 0);
+ }
+ histogram.put(randomNode, histogram.get(randomNode) + 1);
+ }
+ assertEquals("Random is not selecting the nodes it should",
+ 2, histogram.size());
}
private NodeElement getNewNode(String name, String rackLocation) {