You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2019/11/20 15:25:29 UTC

[hadoop-ozone] branch HDDS-2034 updated: fix TestDeadNodeHandler

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch HDDS-2034
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


View the commit online:
https://github.com/apache/hadoop-ozone/commit/724e868805944c5c2a99cf7a50adcaccda99e563

The following commit(s) were added to refs/heads/HDDS-2034 by this push:
     new 724e868  fix TestDeadNodeHandler
724e868 is described below

commit 724e868805944c5c2a99cf7a50adcaccda99e563
Author: Márton Elek <el...@apache.org>
AuthorDate: Wed Nov 20 16:25:12 2019 +0100

    fix TestDeadNodeHandler
---
 .../hadoop/hdds/scm/node/TestDeadNodeHandler.java  | 48 ++++++++++++++++------
 pom.xml                                            |  2 +-
 2 files changed, 36 insertions(+), 14 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 1676af1..24e3870 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -30,6 +30,8 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
 import org.apache.hadoop.hdds.protocol.proto
@@ -43,7 +45,10 @@ import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.ContainerReplica;
 import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
+import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
@@ -51,6 +56,7 @@ import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
 import org.apache.hadoop.hdds.scm.pipeline.MockRatisPipelineProvider;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .NodeReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
@@ -58,22 +64,23 @@ import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.security.authentication.client
     .AuthenticationException;
 import org.apache.hadoop.test.GenericTestUtils;
+
+import org.apache.commons.io.IOUtils;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
 import org.mockito.Mockito;
+import org.slf4j.LoggerFactory;
 
 /**
  * Test DeadNodeHandler.
  */
 public class TestDeadNodeHandler {
 
-  private StorageContainerManager scm;
   private SCMNodeManager nodeManager;
   private ContainerManager containerManager;
-  private NodeReportHandler nodeReportHandler;
   private SCMPipelineManager pipelineManager;
   private DeadNodeHandler deadNodeHandler;
   private EventPublisher publisher;
@@ -87,32 +94,40 @@ public class TestDeadNodeHandler {
         TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
     conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
     eventQueue = new EventQueue();
-    scm = HddsTestUtils.getScm(conf);
-    nodeManager = (SCMNodeManager) scm.getScmNodeManager();
-    pipelineManager =
-        (SCMPipelineManager)scm.getPipelineManager();
+
+    NetworkTopology topology = Mockito.mock(NetworkTopology.class);
+    SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
+    Mockito.when(storageConfig.getScmId()).thenReturn("SCM-ID");
+    Mockito.when(storageConfig.getClusterID()).thenReturn("CLUSTER_ID");
+
+    nodeManager = new SCMNodeManager(conf, storageConfig, eventQueue, topology);
+
+    pipelineManager = new SCMPipelineManager(conf, nodeManager, eventQueue);
+
     PipelineProvider mockRatisProvider =
         new MockRatisPipelineProvider(nodeManager,
             pipelineManager.getStateManager(), conf);
+
     pipelineManager.setPipelineProvider(HddsProtos.ReplicationType.RATIS,
         mockRatisProvider);
-    containerManager = scm.getContainerManager();
+
+    containerManager =
+        new SCMContainerManager(conf, nodeManager, pipelineManager, eventQueue);
+
     deadNodeHandler = new DeadNodeHandler(nodeManager,
         Mockito.mock(PipelineManager.class), containerManager);
     eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
     publisher = Mockito.mock(EventPublisher.class);
-    nodeReportHandler = new NodeReportHandler(nodeManager);
   }
 
   @After
-  public void teardown() {
-    scm.stop();
-    scm.join();
-    FileUtil.fullyDelete(new File(storageDir));
+  public void teardown() throws IOException {
+      FileUtil.fullyDelete(new File(storageDir));
+    pipelineManager.close();
+    nodeManager.close();
   }
 
   @Test
-  @Ignore("Tracked by HDDS-2508.")
   public void testOnMessage() throws IOException, NodeNotFoundException {
     //GIVEN
     DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
@@ -136,6 +151,8 @@ public class TestDeadNodeHandler {
     nodeManager.register(datanode3,
         TestUtils.createNodeReport(storageOne), null);
 
+    pipelineManager.createPipeline(ReplicationType.RATIS, ReplicationFactor.THREE);
+
     nodeManager.register(TestUtils.randomDatanodeDetails(),
         TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(TestUtils.randomDatanodeDetails(),
@@ -143,6 +160,8 @@ public class TestDeadNodeHandler {
     nodeManager.register(TestUtils.randomDatanodeDetails(),
         TestUtils.createNodeReport(storageOne), null);
 
+    pipelineManager.createPipeline(ReplicationType.RATIS, ReplicationFactor.THREE);
+
     nodeManager.register(TestUtils.randomDatanodeDetails(),
         TestUtils.createNodeReport(storageOne), null);
     nodeManager.register(TestUtils.randomDatanodeDetails(),
@@ -150,6 +169,9 @@ public class TestDeadNodeHandler {
     nodeManager.register(TestUtils.randomDatanodeDetails(),
         TestUtils.createNodeReport(storageOne), null);
 
+    pipelineManager.createPipeline(ReplicationType.RATIS, ReplicationFactor.THREE);
+
+
     TestUtils.openAllRatisPipelines(pipelineManager);
 
     ContainerInfo container1 =
diff --git a/pom.xml b/pom.xml
index 4c03e2b..9e03f6e 100644
--- a/pom.xml
+++ b/pom.xml
@@ -120,7 +120,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
 
     <!-- Used for building path to native library loaded by tests.  Projects -->
     <!-- at different nesting levels in the source tree may need to override. -->
-    <hadoop.common.build.dir>${basedir}/../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
+    <hadoop.common.build.dir>${basedir}/../../hadoop-ozone/hadoop-common-project/target</hadoop.common.build.dir>
     <java.security.egd>file:///dev/urandom</java.security.egd>
 
     <!-- avro version -->


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org