You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ki...@apache.org on 2019/09/18 21:44:18 UTC

[hadoop] branch branch-3.2 updated: HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by Ayush Saxena.

This is an automated email from the ASF dual-hosted git repository.

kihwal pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
     new e29f91b  HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by Ayush Saxena.
e29f91b is described below

commit e29f91bad3d76c8a6171f5855493b057a09a161c
Author: Kihwal Lee <ki...@apache.org>
AuthorDate: Wed Sep 18 15:53:08 2019 -0500

    HDFS-13959. TestUpgradeDomainBlockPlacementPolicy is flaky. Contributed by Ayush Saxena.
    
    (cherry picked from commit 1851d06eb3b70f39f3054a7c06f0ad2bc664aaec)
---
 .../TestUpgradeDomainBlockPlacementPolicy.java     | 22 +++++++++++++---------
 1 file changed, 13 insertions(+), 9 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
index 8460b6f..3383c4e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
@@ -65,14 +65,8 @@ public class TestUpgradeDomainBlockPlacementPolicy {
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   static final String[] racks =
       { "/RACK1", "/RACK1", "/RACK1", "/RACK2", "/RACK2", "/RACK2" };
-  /**
-   *  Use host names that can be resolved (
-   *  InetSocketAddress#isUnresolved == false). Otherwise,
-   *  CombinedHostFileManager won't allow those hosts.
-   */
   static final String[] hosts =
-      {"127.0.0.1", "127.0.0.1", "127.0.0.1", "127.0.0.1",
-          "127.0.0.1", "127.0.0.1"};
+      {"host1", "host2", "host3", "host4", "host5", "host6"};
   static final String[] upgradeDomains =
       {"ud5", "ud2", "ud3", "ud1", "ud2", "ud4"};
   static final Set<DatanodeID> expectedDatanodeIDs = new HashSet<>();
@@ -134,7 +128,12 @@ public class TestUpgradeDomainBlockPlacementPolicy {
     for (int i = 0; i < hosts.length; i++) {
       datanodes[i] = new DatanodeAdminProperties();
       DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
-      datanodes[i].setHostName(datanodeID.getHostName());
+      /*
+       *  Use host names that can be resolved (
+       *  InetSocketAddress#isUnresolved == false). Otherwise,
+       *  CombinedHostFileManager won't allow those hosts.
+       */
+      datanodes[i].setHostName(datanodeID.getIpAddr());
       datanodes[i].setPort(datanodeID.getXferPort());
       datanodes[i].setUpgradeDomain(upgradeDomains[i]);
     }
@@ -168,7 +167,12 @@ public class TestUpgradeDomainBlockPlacementPolicy {
     for (int i = 0; i < hosts.length; i++) {
       datanodes[i] = new DatanodeAdminProperties();
       DatanodeID datanodeID = cluster.getDataNodes().get(i).getDatanodeId();
-      datanodes[i].setHostName(datanodeID.getHostName());
+      /*
+       *  Use host names that can be resolved (
+       *  InetSocketAddress#isUnresolved == false). Otherwise,
+       *  CombinedHostFileManager won't allow those hosts.
+       */
+      datanodes[i].setHostName(datanodeID.getIpAddr());
       datanodes[i].setPort(datanodeID.getXferPort());
       datanodes[i].setUpgradeDomain(upgradeDomains[i]);
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org