You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sa...@apache.org on 2020/07/22 13:00:15 UTC
[hadoop-ozone] 05/39: HDDS-3920. Too many redudant replications due to fail to get node's a… (#1163)
This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch ozone-0.6.0
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 16d8a3a5309ab97750e8ddec5ace7a727149ab32
Author: Sammi Chen <sa...@apache.org>
AuthorDate: Mon Jul 13 14:51:52 2020 +0800
HDDS-3920. Too many redudant replications due to fail to get node's a… (#1163)
(cherry picked from commit 9106f1421667c407dd5a533e1f1f37369424f0a3)
---
.../hadoop/hdds/protocol/DatanodeDetails.java | 1 +
.../hadoop/hdds/scm/block/DeletedBlockLogImpl.java | 2 +-
.../hdds/scm/container/ContainerReportHandler.java | 9 +++-
.../IncrementalContainerReportHandler.java | 13 +++--
.../hadoop/hdds/scm/node/SCMNodeManager.java | 63 ++++++++++++----------
.../TestIncrementalContainerReportHandler.java | 36 +++++++++++--
.../hdds/scm/node/TestNodeReportHandler.java | 5 +-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 2 +
.../hadoop/ozone/scm/node/TestSCMNodeMetrics.java | 5 +-
.../ozone/om/TestContainerReportWithKeys.java | 10 ++++
.../ReconIncrementalContainerReportHandler.java | 12 ++++-
...TestReconIncrementalContainerReportHandler.java | 30 ++++++++---
12 files changed, 140 insertions(+), 48 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
index 7fa7765..1b6a214 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/protocol/DatanodeDetails.java
@@ -78,6 +78,7 @@ public class DatanodeDetails extends NodeImpl implements
this.hostName = datanodeDetails.hostName;
this.ports = datanodeDetails.ports;
this.setNetworkName(datanodeDetails.getNetworkName());
+ this.setParent(datanodeDetails.getParent());
}
/**
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
index 08639ba..bf03174 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java
@@ -132,7 +132,7 @@ public class DeletedBlockLogImpl
if (block == null) {
// Should we make this an error ? How can we not find the deleted
// TXID?
- LOG.warn("Deleted TXID not found.");
+ LOG.warn("Deleted TXID {} not found.", txID);
continue;
}
DeletedBlocksTransaction.Builder builder = block.toBuilder();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
index 1b0b81f..8432e29 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReportHandler.java
@@ -102,8 +102,15 @@ public class ContainerReportHandler extends AbstractContainerReportHandler
public void onMessage(final ContainerReportFromDatanode reportFromDatanode,
final EventPublisher publisher) {
- final DatanodeDetails datanodeDetails =
+ final DatanodeDetails dnFromReport =
reportFromDatanode.getDatanodeDetails();
+ DatanodeDetails datanodeDetails =
+ nodeManager.getNodeByUuid(dnFromReport.getUuidString());
+ if (datanodeDetails == null) {
+ LOG.warn("Received container report from unknown datanode {}",
+ dnFromReport);
+ return;
+ }
final ContainerReportsProto containerReport =
reportFromDatanode.getReport();
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
index 017cc5c..c2148df 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/IncrementalContainerReportHandler.java
@@ -54,16 +54,23 @@ public class IncrementalContainerReportHandler extends
@Override
public void onMessage(final IncrementalContainerReportFromDatanode report,
final EventPublisher publisher) {
+ final DatanodeDetails dnFromReport = report.getDatanodeDetails();
if (LOG.isDebugEnabled()) {
LOG.debug("Processing incremental container report from data node {}",
- report.getDatanodeDetails().getUuid());
+ dnFromReport.getUuid());
+ }
+ DatanodeDetails dd =
+ nodeManager.getNodeByUuid(dnFromReport.getUuidString());
+ if (dd == null) {
+ LOG.warn("Received container report from unknown datanode {}",
+ dnFromReport);
+ return;
}
boolean success = true;
for (ContainerReplicaProto replicaProto :
report.getReport().getReportList()) {
try {
- final DatanodeDetails dd = report.getDatanodeDetails();
final ContainerID id = ContainerID.valueof(
replicaProto.getContainerID());
if (!replicaProto.getState().equals(
@@ -81,7 +88,7 @@ public class IncrementalContainerReportHandler extends
} catch (IOException e) {
success = false;
LOG.error("Exception while processing ICR for container {}",
- replicaProto.getContainerID());
+ replicaProto.getContainerID(), e);
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 5a3851a..005881c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -241,34 +241,43 @@ public class SCMNodeManager implements NodeManager {
DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
PipelineReportsProto pipelineReportsProto) {
- InetAddress dnAddress = Server.getRemoteIp();
- if (dnAddress != null) {
- // Mostly called inside an RPC, update ip and peer hostname
- datanodeDetails.setHostName(dnAddress.getHostName());
- datanodeDetails.setIpAddress(dnAddress.getHostAddress());
- }
- try {
- String dnsName;
- String networkLocation;
- datanodeDetails.setNetworkName(datanodeDetails.getUuidString());
- if (useHostname) {
- dnsName = datanodeDetails.getHostName();
- } else {
- dnsName = datanodeDetails.getIpAddress();
- }
- networkLocation = nodeResolve(dnsName);
- if (networkLocation != null) {
- datanodeDetails.setNetworkLocation(networkLocation);
+ if (!isNodeRegistered(datanodeDetails)) {
+ InetAddress dnAddress = Server.getRemoteIp();
+ if (dnAddress != null) {
+ // Mostly called inside an RPC, update ip and peer hostname
+ datanodeDetails.setHostName(dnAddress.getHostName());
+ datanodeDetails.setIpAddress(dnAddress.getHostAddress());
}
- nodeStateManager.addNode(datanodeDetails);
- clusterMap.add(datanodeDetails);
- addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString());
- // Updating Node Report, as registration is successful
- processNodeReport(datanodeDetails, nodeReport);
- LOG.info("Registered Data node : {}", datanodeDetails);
- } catch (NodeAlreadyExistsException e) {
- if (LOG.isTraceEnabled()) {
- LOG.trace("Datanode is already registered. Datanode: {}",
+ try {
+ String dnsName;
+ String networkLocation;
+ datanodeDetails.setNetworkName(datanodeDetails.getUuidString());
+ if (useHostname) {
+ dnsName = datanodeDetails.getHostName();
+ } else {
+ dnsName = datanodeDetails.getIpAddress();
+ }
+ networkLocation = nodeResolve(dnsName);
+ if (networkLocation != null) {
+ datanodeDetails.setNetworkLocation(networkLocation);
+ }
+
+ clusterMap.add(datanodeDetails);
+ nodeStateManager.addNode(datanodeDetails);
+ // Check that datanode in nodeStateManager has topology parent set
+ DatanodeDetails dn = nodeStateManager.getNode(datanodeDetails);
+ Preconditions.checkState(dn.getParent() != null);
+ addEntryTodnsToUuidMap(dnsName, datanodeDetails.getUuidString());
+ // Updating Node Report, as registration is successful
+ processNodeReport(datanodeDetails, nodeReport);
+ LOG.info("Registered Data node : {}", datanodeDetails);
+ } catch (NodeAlreadyExistsException e) {
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("Datanode is already registered. Datanode: {}",
+ datanodeDetails.toString());
+ }
+ } catch (NodeNotFoundException e) {
+ LOG.error("Cannot find datanode {} from nodeStateManager",
datanodeDetails.toString());
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index efa333d..1af2f73 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.container;
-import org.apache.hadoop.hdds.conf.ConfigurationSource;
+import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -26,10 +26,16 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
+import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
.IncrementalContainerReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -37,7 +43,10 @@ import org.junit.Test;
import org.mockito.Mockito;
import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
import java.util.Set;
+import java.util.UUID;
import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static org.apache.hadoop.hdds.scm.TestUtils.getContainer;
@@ -55,9 +64,18 @@ public class TestIncrementalContainerReportHandler {
@Before
public void setup() throws IOException {
- final ConfigurationSource conf = new OzoneConfiguration();
+ final OzoneConfiguration conf = new OzoneConfiguration();
+ final String path =
+ GenericTestUtils.getTempPath(UUID.randomUUID().toString());
+ Path scmPath = Paths.get(path, "scm-meta");
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
this.containerManager = Mockito.mock(ContainerManager.class);
- this.nodeManager = Mockito.mock(NodeManager.class);
+ NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
+ EventQueue eventQueue = new EventQueue();
+ SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
+ this.nodeManager =
+ new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
+
this.containerStateManager = new ContainerStateManager(conf);
this.publisher = Mockito.mock(EventPublisher.class);
@@ -105,6 +123,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
+ nodeManager.register(datanodeOne, null, null);
+ nodeManager.register(datanodeTwo, null, null);
+ nodeManager.register(datanodeThree, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSING,
@@ -139,6 +160,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
+ nodeManager.register(datanodeOne, null, null);
+ nodeManager.register(datanodeTwo, null, null);
+ nodeManager.register(datanodeThree, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSING,
@@ -174,6 +198,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
+ nodeManager.register(datanodeOne, null, null);
+ nodeManager.register(datanodeTwo, null, null);
+ nodeManager.register(datanodeThree, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSING,
@@ -212,6 +239,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
+ nodeManager.register(datanodeOne, null, null);
+ nodeManager.register(datanodeTwo, null, null);
+ nodeManager.register(datanodeThree, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSED,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 7b4d841..69b031c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.Event;
@@ -56,9 +57,9 @@ public class TestNodeReportHandler implements EventPublisher {
OzoneConfiguration conf = new OzoneConfiguration();
SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
+ NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
nodeManager =
- new SCMNodeManager(conf, storageConfig, new EventQueue(), Mockito.mock(
- NetworkTopology.class));
+ new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap);
nodeReportHandler = new NodeReportHandler(nodeManager);
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index b167a38..df5cb2d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -1133,6 +1133,8 @@ public class TestSCMNodeManager {
List<DatanodeDetails> nodeList = nodeManager.getAllNodes();
nodeList.stream().forEach(node ->
Assert.assertTrue(node.getNetworkLocation().startsWith("/rack1/ng")));
+ nodeList.stream().forEach(node ->
+ Assert.assertTrue(node.getParent() != null));
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 3625e34..7576e8b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
@@ -45,7 +45,6 @@ import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.mockito.Mockito;
/**
* Test cases to verify the metrics exposed by SCMNodeManager.
@@ -64,7 +63,7 @@ public class TestSCMNodeMetrics {
SCMStorageConfig config =
new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage");
nodeManager = new SCMNodeManager(source, config, publisher,
- Mockito.mock(NetworkTopology.class));
+ new NetworkTopologyImpl(source));
registeredDatanode = DatanodeDetails.newBuilder()
.setHostName("localhost")
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
index 8984e76..7f049a3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestContainerReportWithKeys.java
@@ -22,7 +22,9 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.client.*;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
@@ -32,6 +34,7 @@ import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.junit.AfterClass;
+import org.junit.Assert;
import org.junit.BeforeClass;
import org.junit.Rule;
import org.junit.Test;
@@ -42,6 +45,7 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.HashMap;
+import java.util.Set;
/**
* This class tests container report with DN container state info.
@@ -122,6 +126,12 @@ public class TestContainerReportWithKeys {
ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
+ Set<ContainerReplica> replicas =
+ scm.getContainerManager().getContainerReplicas(
+ new ContainerID(keyInfo.getContainerID()));
+ Assert.assertTrue(replicas.size() == 1);
+ replicas.stream().forEach(rp ->
+ Assert.assertTrue(rp.getDatanodeDetails().getParent() != null));
LOG.info("SCM Container Info keyCount: {} usedBytes: {}",
cinfo.getNumberOfKeys(), cinfo.getUsedBytes());
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
index a5d946e..b538caf 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconIncrementalContainerReportHandler.java
@@ -50,9 +50,18 @@ public class ReconIncrementalContainerReportHandler
@Override
public void onMessage(final IncrementalContainerReportFromDatanode report,
final EventPublisher publisher) {
+ final DatanodeDetails dnFromReport = report.getDatanodeDetails();
if (LOG.isDebugEnabled()) {
LOG.debug("Processing incremental container report from data node {}",
- report.getDatanodeDetails());
+ dnFromReport);
+ }
+
+ DatanodeDetails dd =
+ getNodeManager().getNodeByUuid(dnFromReport.getUuidString());
+ if (dd == null) {
+ LOG.warn("Received container report from unknown datanode {}",
+ dnFromReport);
+ return;
}
ReconContainerManager containerManager =
@@ -61,7 +70,6 @@ public class ReconIncrementalContainerReportHandler
for (ContainerReplicaProto replicaProto :
report.getReport().getReportList()) {
try {
- final DatanodeDetails dd = report.getDatanodeDetails();
final ContainerID id = ContainerID.valueof(
replicaProto.getContainerID());
try {
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index d4f28c0..dacf293 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -22,21 +22,30 @@ import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanode
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.times;
-import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.UUID;
+import org.apache.hadoop.hdds.HddsConfigKeys;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.net.NetworkTopology;
+import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.IncrementalContainerReportFromDatanode;
+import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
/**
@@ -59,17 +68,26 @@ public class TestReconIncrementalContainerReportHandler
datanodeDetails.getUuidString());
when(reportMock.getReport()).thenReturn(containerReport);
- NodeManager nodeManagerMock = mock(NodeManager.class);
+ final String path =
+ GenericTestUtils.getTempPath(UUID.randomUUID().toString());
+ Path scmPath = Paths.get(path, "scm-meta");
+ final OzoneConfiguration conf = new OzoneConfiguration();
+ conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, scmPath.toString());
+ NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
+ EventQueue eventQueue = new EventQueue();
+ SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
+ NodeManager nodeManager =
+ new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
+ nodeManager.register(datanodeDetails, null, null);
ReconContainerManager containerManager = getContainerManager();
ReconIncrementalContainerReportHandler reconIcr =
- new ReconIncrementalContainerReportHandler(nodeManagerMock,
+ new ReconIncrementalContainerReportHandler(nodeManager,
containerManager);
EventPublisher eventPublisherMock = mock(EventPublisher.class);
reconIcr.onMessage(reportMock, eventPublisherMock);
- verify(nodeManagerMock, times(1))
- .addContainer(datanodeDetails, containerID);
+ nodeManager.addContainer(datanodeDetails, containerID);
assertTrue(containerManager.exists(containerID));
assertEquals(1, containerManager.getContainerReplicas(containerID).size());
}
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org