You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by aa...@apache.org on 2021/01/13 02:57:04 UTC

[hadoop] branch branch-3.1 updated: HDFS-15762. TestMultipleNNPortQOP#testMultipleNNPortOverwriteDownStream fails intermittently (#2598)

This is an automated email from the ASF dual-hosted git repository.

aajisaka pushed a commit to branch branch-3.1
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.1 by this push:
     new 669c6a7  HDFS-15762. TestMultipleNNPortQOP#testMultipleNNPortOverwriteDownStream fails intermittently (#2598)
669c6a7 is described below

commit 669c6a7cf4e5f3decc8c20b9278bbbc0afca24cd
Author: touchida <56...@users.noreply.github.com>
AuthorDate: Wed Jan 13 11:23:07 2021 +0900

    HDFS-15762. TestMultipleNNPortQOP#testMultipleNNPortOverwriteDownStream fails intermittently (#2598)
    
    Co-authored-by: Toshihiko Uchida <to...@linecorp.com>
    Signed-off-by: Akira Ajisaka <aa...@apache.org>
    (cherry picked from commit 8ec824f2ba462770c99f4fe3521f4448d5bc7b0a)
---
 .../apache/hadoop/hdfs/TestMultipleNNPortQOP.java  | 61 +++++++---------------
 1 file changed, 19 insertions(+), 42 deletions(-)

diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
index db42dcc..d536c5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultipleNNPortQOP.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferServer;
 import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferTestCase;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
@@ -251,55 +250,33 @@ public class TestMultipleNNPortQOP extends SaslDataTransferTestCase {
       clientConf.set(HADOOP_RPC_PROTECTION, "privacy");
       FileSystem fsPrivacy = FileSystem.get(uriPrivacyPort, clientConf);
       doTest(fsPrivacy, PATH1);
-      for (int i = 0; i < 2; i++) {
-        DataNode dn = dataNodes.get(i);
-        SaslDataTransferClient saslClient = dn.getSaslClient();
-        String qop = null;
-        // It may take some time for the qop to populate
-        // to all DNs, check in a loop.
-        for (int trial = 0; trial < 10; trial++) {
-          qop = saslClient.getTargetQOP();
-          if (qop != null) {
-            break;
-          }
-          Thread.sleep(100);
-        }
-        assertEquals("auth", qop);
-      }
+      long count = dataNodes.stream()
+          .map(dn -> dn.getSaslClient().getTargetQOP())
+          .filter("auth"::equals)
+          .count();
+      // For each datanode pipeline, targetQOPs of sasl clients in the first two
+      // datanodes become equal to auth.
+      // Note that it is not necessarily the case for all datanodes,
+      // since a datanode may be always at the last position in pipelines.
+      assertTrue("At least two qops should be auth", count >= 2);
 
       clientConf.set(HADOOP_RPC_PROTECTION, "integrity");
       FileSystem fsIntegrity = FileSystem.get(uriIntegrityPort, clientConf);
       doTest(fsIntegrity, PATH2);
-      for (int i = 0; i < 2; i++) {
-        DataNode dn = dataNodes.get(i);
-        SaslDataTransferClient saslClient = dn.getSaslClient();
-        String qop = null;
-        for (int trial = 0; trial < 10; trial++) {
-          qop = saslClient.getTargetQOP();
-          if (qop != null) {
-            break;
-          }
-          Thread.sleep(100);
-        }
-        assertEquals("auth", qop);
-      }
+      count = dataNodes.stream()
+          .map(dn -> dn.getSaslClient().getTargetQOP())
+          .filter("auth"::equals)
+          .count();
+      assertTrue("At least two qops should be auth", count >= 2);
 
       clientConf.set(HADOOP_RPC_PROTECTION, "authentication");
       FileSystem fsAuth = FileSystem.get(uriAuthPort, clientConf);
       doTest(fsAuth, PATH3);
-      for (int i = 0; i < 3; i++) {
-        DataNode dn = dataNodes.get(i);
-        SaslDataTransferServer saslServer = dn.getSaslServer();
-        String qop = null;
-        for (int trial = 0; trial < 10; trial++) {
-          qop = saslServer.getNegotiatedQOP();
-          if (qop != null) {
-            break;
-          }
-          Thread.sleep(100);
-        }
-        assertEquals("auth", qop);
-      }
+      count = dataNodes.stream()
+          .map(dn -> dn.getSaslServer().getNegotiatedQOP())
+          .filter("auth"::equals)
+          .count();
+      assertEquals("All qops should be auth", 3, count);
     } finally {
       if (cluster != null) {
         cluster.shutdown();


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org