You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sa...@apache.org on 2020/02/10 02:23:35 UTC

[hadoop-ozone] 17/18: HDDS-2924. Fix Pipeline#nodeIdsHash collision issue. (#478)

This is an automated email from the ASF dual-hosted git repository.

sammichen pushed a commit to branch HDDS-1564
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git

commit dd2b7e8cec022ccfc1f4abf193dbb62cc5e883ec
Author: Xiaoyu Yao <xy...@apache.org>
AuthorDate: Mon Jan 27 12:45:58 2020 -0800

    HDDS-2924. Fix Pipeline#nodeIdsHash collision issue. (#478)
---
 .../apache/hadoop/hdds/scm/pipeline/Pipeline.java  | 38 ++++++++++----------
 .../hdds/scm/pipeline/PipelinePlacementPolicy.java | 40 ++++++++++------------
 .../hdds/scm/pipeline/PipelineStateManager.java    |  7 ----
 .../hdds/scm/pipeline/RatisPipelineProvider.java   |  8 -----
 .../hdds/scm/pipeline/RatisPipelineUtils.java      | 16 ++-------
 .../scm/pipeline/MockRatisPipelineProvider.java    |  4 ---
 .../scm/pipeline/TestPipelinePlacementPolicy.java  |  7 ++--
 .../scm/pipeline/TestRatisPipelineProvider.java    | 36 ++-----------------
 .../hdds/scm/pipeline/TestSCMPipelineManager.java  | 17 ++++-----
 .../hdds/scm/cli/datanode/ListInfoSubcommand.java  |  4 +--
 .../compose/ozone-topology/docker-compose.yaml     | 28 +++++++++++++++
 11 files changed, 84 insertions(+), 121 deletions(-)

diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 6849494..5a28e42 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -21,11 +21,13 @@ package org.apache.hadoop.hdds.scm.pipeline;
 import java.io.IOException;
 import java.time.Instant;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.Iterator;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
 import java.util.Optional;
+import java.util.Set;
 import java.util.UUID;
 import java.util.stream.Collectors;
 
@@ -59,8 +61,6 @@ public final class Pipeline {
   private UUID leaderId;
   // Timestamp for pipeline upon creation
   private Instant creationTimestamp;
-  // Only valid for Ratis THREE pipeline. No need persist.
-  private int nodeIdsHash;
 
   /**
    * The immutable properties of pipeline object is used in
@@ -76,7 +76,6 @@ public final class Pipeline {
     this.state = state;
     this.nodeStatus = nodeStatus;
     this.creationTimestamp = Instant.now();
-    this.nodeIdsHash = 0;
   }
 
   /**
@@ -133,14 +132,6 @@ public final class Pipeline {
     this.creationTimestamp = creationTimestamp;
   }
 
-  public int getNodeIdsHash() {
-    return nodeIdsHash;
-  }
-
-  void setNodeIdsHash(int nodeIdsHash) {
-    this.nodeIdsHash = nodeIdsHash;
-  }
-
   /**
    * Return the pipeline leader's UUID.
    *
@@ -167,6 +158,23 @@ public final class Pipeline {
   }
 
   /**
+   * Return an immutable set of nodes which form this pipeline.
+   * @return Set of DatanodeDetails
+   */
+  public Set<DatanodeDetails> getNodeSet() {
+    return Collections.unmodifiableSet(nodeStatus.keySet());
+  }
+
+  /**
+   * Check if the input pipeline share the same set of datanodes.
+   * @param pipeline
+   * @return true if the input pipeline shares the same set of datanodes.
+   */
+  public boolean sameDatanodes(Pipeline pipeline) {
+    return getNodeSet().equals(pipeline.getNodeSet());
+  }
+
+  /**
    * Returns the leader if found else defaults to closest node.
    *
    * @return {@link DatanodeDetails}
@@ -360,7 +368,6 @@ public final class Pipeline {
     private List<DatanodeDetails> nodesInOrder = null;
     private UUID leaderId = null;
     private Instant creationTimestamp = null;
-    private int nodeIdsHash = 0;
 
     public Builder() {}
 
@@ -373,7 +380,6 @@ public final class Pipeline {
       this.nodesInOrder = pipeline.nodesInOrder.get();
       this.leaderId = pipeline.getLeaderId();
       this.creationTimestamp = pipeline.getCreationTimestamp();
-      this.nodeIdsHash = 0;
     }
 
     public Builder setId(PipelineID id1) {
@@ -417,11 +423,6 @@ public final class Pipeline {
       return this;
     }
 
-    public Builder setNodeIdsHash(int nodeIdsHash1) {
-      this.nodeIdsHash = nodeIdsHash1;
-      return this;
-    }
-
     public Pipeline build() {
       Preconditions.checkNotNull(id);
       Preconditions.checkNotNull(type);
@@ -430,7 +431,6 @@ public final class Pipeline {
       Preconditions.checkNotNull(nodeStatus);
       Pipeline pipeline = new Pipeline(id, type, factor, state, nodeStatus);
       pipeline.setLeaderId(leaderId);
-      pipeline.setNodeIdsHash(nodeIdsHash);
       // overwrite with original creationTimestamp
       if (creationTimestamp != null) {
         pipeline.setCreationTimestamp(creationTimestamp);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 4261a87..9d78063 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -145,12 +145,10 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
     String msg;
 
     if (initialHealthyNodesCount < nodesRequired) {
-      LOG.warn("Not enough healthy nodes to allocate pipeline." +
-              nodesRequired + " datanodes required. Found: " +
-          initialHealthyNodesCount);
       msg = String.format("Pipeline creation failed due to no sufficient" +
               " healthy datanodes. Required %d. Found %d.",
           nodesRequired, initialHealthyNodesCount);
+      LOG.warn(msg);
       throw new SCMException(msg,
           SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
     }
@@ -229,42 +227,49 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
     // First choose an anchor nodes randomly
     DatanodeDetails anchor = chooseNode(healthyNodes);
     if (anchor == null) {
-      LOG.warn("Unable to find healthy nodes." +
+      LOG.warn("Unable to find healthy node for anchor(first) node." +
               " Required nodes: {}, Found nodes: {}",
           nodesRequired, results.size());
       throw new SCMException("Unable to find required number of nodes.",
           SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
     }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("First node chosen: {}", anchor);
+    }
 
     results.add(anchor);
     exclude.add(anchor);
-    nodesRequired--;
 
     // Choose the second node on different racks from anchor.
     DatanodeDetails nodeOnDifferentRack = chooseNodeBasedOnRackAwareness(
         healthyNodes, exclude,
         nodeManager.getClusterNetworkTopologyMap(), anchor);
     if (nodeOnDifferentRack == null) {
-      LOG.warn("Pipeline Placement: Unable to find nodes on different racks " +
-              " that meet the criteria. Required nodes: {}, Found nodes: {}",
-          nodesRequired, results.size());
+      LOG.warn("Pipeline Placement: Unable to find 2nd node on different " +
+          "racks that meets the criteria. Required nodes: {}, Found nodes:" +
+          " {}", nodesRequired, results.size());
       throw new SCMException("Unable to find required number of nodes.",
           SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
     }
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("Second node chosen: {}", nodeOnDifferentRack);
+    }
 
     results.add(nodeOnDifferentRack);
     exclude.add(nodeOnDifferentRack);
-    nodesRequired--;
 
     // Then choose nodes close to anchor based on network topology
-    for (int x = 0; x < nodesRequired; x++) {
+    int nodesToFind = nodesRequired - results.size();
+    for (int x = 0; x < nodesToFind; x++) {
       // invoke the choose function defined in the derived classes.
       DatanodeDetails pick = chooseNodeFromNetworkTopology(
           nodeManager.getClusterNetworkTopologyMap(), anchor, exclude);
       if (pick != null) {
         results.add(pick);
-        // exclude the picked node for next time
         exclude.add(pick);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Remaining node chosen: {}", pick);
+        }
       }
     }
 
@@ -306,9 +311,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
       datanodeDetails = firstNodeMetric.isGreater(secondNodeMetric.get())
           ? firstNodeDetails : secondNodeDetails;
     }
-    // the pick is decided and it should be removed from candidates.
     healthyNodes.remove(datanodeDetails);
-
     return datanodeDetails;
   }
 
@@ -331,12 +334,10 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
     }
 
     for (DatanodeDetails node : healthyNodes) {
-      if (excludedNodes.contains(node)
-          || networkTopology.isSameParent(anchor, node)) {
+      if (excludedNodes.contains(node) ||
+          anchor.getNetworkLocation().equals(node.getNetworkLocation())) {
         continue;
       } else {
-        // the pick is decided and it should be removed from candidates.
-        healthyNodes.remove(node);
         return node;
       }
     }
@@ -374,15 +375,10 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
     if (excludedNodes != null && excludedNodes.size() != 0) {
       excluded.addAll(excludedNodes);
     }
-    excluded.add(anchor);
 
     Node pick = networkTopology.chooseRandom(
         anchor.getNetworkLocation(), excluded);
     DatanodeDetails pickedNode = (DatanodeDetails) pick;
-    // exclude the picked node for next time
-    if (excludedNodes != null) {
-      excludedNodes.add(pickedNode);
-    }
     return pickedNode;
   }
 }
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
index 051202b..bb56a03 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelineStateManager.java
@@ -132,13 +132,6 @@ public class PipelineStateManager {
       pipeline = pipelineStateMap
           .updatePipelineState(pipelineId, PipelineState.OPEN);
     }
-    // Amend nodeIdsHash if needed.
-    if (pipeline.getType() == ReplicationType.RATIS &&
-        pipeline.getFactor() == ReplicationFactor.THREE &&
-        pipeline.getNodeIdsHash() == 0) {
-      pipeline.setNodeIdsHash(RatisPipelineUtils
-          .encodeNodeIdsOfFactorThreePipeline(pipeline.getNodes()));
-    }
     return pipeline;
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 4865074..13c3b6a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -157,7 +157,6 @@ public class RatisPipelineProvider implements PipelineProvider {
     }
 
     List<DatanodeDetails> dns;
-    int nodeIdHash = 0;
 
     switch(factor) {
     case ONE:
@@ -166,7 +165,6 @@ public class RatisPipelineProvider implements PipelineProvider {
     case THREE:
       dns = placementPolicy.chooseDatanodes(null,
           null, factor.getNumber(), 0);
-      nodeIdHash = RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(dns);
       break;
     default:
       throw new IllegalStateException("Unknown factor: " + factor.name());
@@ -178,7 +176,6 @@ public class RatisPipelineProvider implements PipelineProvider {
         .setType(ReplicationType.RATIS)
         .setFactor(factor)
         .setNodes(dns)
-        .setNodeIdsHash(nodeIdHash)
         .build();
 
     // Send command to datanodes to create pipeline
@@ -199,17 +196,12 @@ public class RatisPipelineProvider implements PipelineProvider {
   @Override
   public Pipeline create(ReplicationFactor factor,
                          List<DatanodeDetails> nodes) {
-    int nodeIdHash = 0;
-    if (factor == ReplicationFactor.THREE) {
-      nodeIdHash = RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes);
-    }
     return Pipeline.newBuilder()
         .setId(PipelineID.randomId())
         .setState(PipelineState.ALLOCATED)
         .setType(ReplicationType.RATIS)
         .setFactor(factor)
         .setNodes(nodes)
-        .setNodeIdsHash(nodeIdHash)
         .build();
   }
 
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index 7fe1cc1..552ae7d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -96,21 +96,12 @@ public final class RatisPipelineUtils {
     }
   }
 
-  static int encodeNodeIdsOfFactorThreePipeline(List<DatanodeDetails> nodes) {
-    if (nodes.size() != HddsProtos.ReplicationFactor.THREE.getNumber()) {
-      return 0;
-    }
-    return nodes.get(0).getUuid().hashCode() ^
-        nodes.get(1).getUuid().hashCode() ^
-        nodes.get(2).getUuid().hashCode();
-  }
-
   /**
    * Return the list of pipelines who share the same set of datanodes
    * with the input pipeline.
    * @param stateManager PipelineStateManager
    * @param pipeline input pipeline
-   * @return first matched pipeline
+   * @return list of matched pipeline
    */
   static List<Pipeline> checkPipelineContainSameDatanodes(
       PipelineStateManager stateManager, Pipeline pipeline) {
@@ -118,9 +109,8 @@ public final class RatisPipelineUtils {
         HddsProtos.ReplicationType.RATIS,
         HddsProtos.ReplicationFactor.THREE)
         .stream().filter(p -> !p.getId().equals(pipeline.getId()) &&
-            (// For all OPEN or ALLOCATED pipelines
-                p.getPipelineState() != Pipeline.PipelineState.CLOSED &&
-                p.getNodeIdsHash() == pipeline.getNodeIdsHash()))
+            (p.getPipelineState() != Pipeline.PipelineState.CLOSED &&
+                p.sameDatanodes(pipeline)))
         .collect(Collectors.toList());
   }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
index 3eb146a..ff52470 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/MockRatisPipelineProvider.java
@@ -73,8 +73,6 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider {
           .setType(initialPipeline.getType())
           .setFactor(factor)
           .setNodes(initialPipeline.getNodes())
-          .setNodeIdsHash(RatisPipelineUtils
-              .encodeNodeIdsOfFactorThreePipeline(initialPipeline.getNodes()))
           .build();
     }
   }
@@ -93,8 +91,6 @@ public class MockRatisPipelineProvider extends RatisPipelineProvider {
         .setType(HddsProtos.ReplicationType.RATIS)
         .setFactor(factor)
         .setNodes(nodes)
-        .setNodeIdsHash(RatisPipelineUtils
-            .encodeNodeIdsOfFactorThreePipeline(nodes))
         .build();
   }
 }
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index 2fff7d9..b9aa9af 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -65,10 +65,10 @@ public class TestPipelinePlacementPolicy {
 
     List<DatanodeDetails> excludedNodes =
         new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT);
+    excludedNodes.add(anchor);
     DatanodeDetails nextNode = placementPolicy.chooseNodeFromNetworkTopology(
         nodeManager.getClusterNetworkTopologyMap(), anchor, excludedNodes);
-    // excludedNodes should contain nextNode after being chosen.
-    Assert.assertTrue(excludedNodes.contains(nextNode));
+    Assert.assertFalse(excludedNodes.contains(nextNode));
     // nextNode should not be the same as anchor.
     Assert.assertTrue(anchor.getUuid() != nextNode.getUuid());
   }
@@ -83,7 +83,8 @@ public class TestPipelinePlacementPolicy {
     DatanodeDetails nextNode = placementPolicy.chooseNodeBasedOnRackAwareness(
         healthyNodes, new ArrayList<>(PIPELINE_PLACEMENT_MAX_NODES_COUNT),
         topologyWithDifRacks, anchor);
-    Assert.assertFalse(topologyWithDifRacks.isSameParent(anchor, nextNode));
+    Assert.assertFalse(anchor.getNetworkLocation().equals(
+        nextNode.getNetworkLocation()));
   }
 
   private final static Node[] NODES = new NodeImpl[] {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index a17fc08..86d54b3 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -35,7 +35,6 @@ import java.util.ArrayList;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Set;
-import java.util.UUID;
 import java.util.stream.Collectors;
 
 import static org.apache.commons.collections.CollectionUtils.intersection;
@@ -84,7 +83,7 @@ public class TestRatisPipelineProvider {
         intersection(pipeline.getNodes(), pipeline1.getNodes())
             .size() < factor.getNumber());
     if (pipeline.getFactor() == HddsProtos.ReplicationFactor.THREE) {
-      assertNotEquals(pipeline.getNodeIdsHash(), pipeline1.getNodeIdsHash());
+      assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet());
     }
     stateManager.addPipeline(pipeline1);
     nodeManager.addPipeline(pipeline1);
@@ -105,7 +104,7 @@ public class TestRatisPipelineProvider {
     stateManager.addPipeline(pipeline1);
     // With enough pipeline quote on datanodes, they should not share
     // the same set of datanodes.
-    assertNotEquals(pipeline.getNodeIdsHash(), pipeline1.getNodeIdsHash());
+    assertNotEquals(pipeline.getNodeSet(), pipeline1.getNodeSet());
   }
 
   @Test
@@ -141,33 +140,6 @@ public class TestRatisPipelineProvider {
   }
 
   @Test
-  public void testComputeNodeIdsHash() {
-    int total = HddsProtos.ReplicationFactor.THREE.getNumber();
-    List<DatanodeDetails> nodes1 = new ArrayList<>();
-    for (int i = 0; i < total; i++) {
-      nodes1.add(MockDatanodeDetails.createDatanodeDetails(
-          UUID.fromString("00000-11000-00000-00000-0000" + (i + 1))));
-    }
-
-    Assert.assertEquals(total, nodes1.size());
-    Assert.assertNotEquals(0,
-        RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes1));
-
-    List<DatanodeDetails> nodes2 = new ArrayList<>();
-    for (int i = 0; i < total; i++) {
-      nodes2.add(MockDatanodeDetails.createDatanodeDetails(
-          UUID.fromString("00000-11000-00000-00000-0000" + (total - i))));
-    }
-    Assert.assertEquals(total, nodes2.size());
-    Assert.assertNotEquals(0,
-        RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes2));
-
-    Assert.assertEquals(
-        RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes1),
-        RatisPipelineUtils.encodeNodeIdsOfFactorThreePipeline(nodes2));
-  }
-
-  @Test
   public void testCreateFactorTHREEPipelineWithSameDatanodes() {
     List<DatanodeDetails> healthyNodes = nodeManager
         .getNodes(HddsProtos.NodeState.HEALTHY).stream()
@@ -178,9 +150,7 @@ public class TestRatisPipelineProvider {
     Pipeline pipeline2 = provider.create(
         HddsProtos.ReplicationFactor.THREE, healthyNodes);
 
-    Assert.assertTrue(pipeline1.getNodes().parallelStream()
-        .allMatch(pipeline2.getNodes()::contains));
-    Assert.assertEquals(pipeline1.getNodeIdsHash(), pipeline2.getNodeIdsHash());
+    Assert.assertEquals(pipeline1.getNodeSet(), pipeline2.getNodeSet());
   }
 
   @Test
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index deba91b..ab23153 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.hdds.scm.pipeline;
 
-import static org.apache.commons.collections.CollectionUtils.intersection;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT;
 import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
@@ -116,15 +115,13 @@ public class TestSCMPipelineManager {
     List<Pipeline> pipelineList =
         pipelineManager.getPipelines(HddsProtos.ReplicationType.RATIS);
     Assert.assertEquals(pipelines, new HashSet<>(pipelineList));
-    // All NodeIdsHash from original pipeline list
-    List<Integer> originalPipelineHash = pipelineList.stream()
-        .map(Pipeline::getNodeIdsHash).collect(Collectors.toList());
-    // All NodeIdsHash from reloaded pipeline list
-    List<Integer> reloadedPipelineHash = pipelines.stream()
-        .map(Pipeline::getNodeIdsHash).collect(Collectors.toList());
-    // Original NodeIdsHash list should contain same items from reloaded one.
-    Assert.assertEquals(pipelineNum,
-        intersection(originalPipelineHash, reloadedPipelineHash).size());
+
+    Set<Set<DatanodeDetails>> originalPipelines = pipelineList.stream()
+        .map(Pipeline::getNodeSet).collect(Collectors.toSet());
+    Set<Set<DatanodeDetails>> reloadedPipelineHash = pipelines.stream()
+        .map(Pipeline::getNodeSet).collect(Collectors.toSet());
+    Assert.assertEquals(reloadedPipelineHash, originalPipelines);
+    Assert.assertEquals(pipelineNum, originalPipelines.size());
 
     // clean up
     for (Pipeline pipeline : pipelines) {
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
index badfadc..e4060b3 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
@@ -114,8 +114,8 @@ public class ListInfoSubcommand implements Callable<Void> {
       pipelineListInfo.append("No pipelines in cluster.");
     }
     System.out.println("Datanode: " + datanode.getUuid().toString() +
-        " (" + datanode.getIpAddress() + "/"
-        + datanode.getHostName() + "/" + relatedPipelineNum +
+        " (" + datanode.getNetworkLocation() + "/" + datanode.getIpAddress()
+        + "/" + datanode.getHostName() + "/" + relatedPipelineNum +
         " pipelines) \n" + "Related pipelines: \n" + pipelineListInfo);
   }
 }
\ No newline at end of file
diff --git a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
index 69611fa..ccd131c 100644
--- a/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
+++ b/hadoop-ozone/dist/src/main/compose/ozone-topology/docker-compose.yaml
@@ -72,6 +72,34 @@ services:
       networks:
          net:
             ipv4_address: 10.5.0.7
+   datanode_5:
+     image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+     privileged: true #required by the profiler
+     volumes:
+       - ../..:/opt/hadoop
+     ports:
+       - 9864
+       - 9882
+     command: ["/opt/hadoop/bin/ozone","datanode"]
+     env_file:
+       - ./docker-config
+     networks:
+       net:
+         ipv4_address: 10.5.0.8
+   datanode_6:
+     image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
+     privileged: true #required by the profiler
+     volumes:
+       - ../..:/opt/hadoop
+     ports:
+       - 9864
+       - 9882
+     command: ["/opt/hadoop/bin/ozone","datanode"]
+     env_file:
+       - ./docker-config
+     networks:
+       net:
+         ipv4_address: 10.5.0.9
    om:
       image: apache/ozone-runner:${OZONE_RUNNER_VERSION}
       privileged: true #required by the profiler


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org