You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by av...@apache.org on 2020/10/06 19:43:04 UTC
[hadoop-ozone] branch HDDS-3698-upgrade updated: HDDS-4253. Add
LayoutVersion request/response for DN registration. (#1457)
This is an automated email from the ASF dual-hosted git repository.
avijayan pushed a commit to branch HDDS-3698-upgrade
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
The following commit(s) were added to refs/heads/HDDS-3698-upgrade by this push:
new b07927e HDDS-4253. Add LayoutVersion request/response for DN registration. (#1457)
b07927e is described below
commit b07927e81011d9df0389cb112f05169116b398d6
Author: prashantpogde <pr...@gmail.com>
AuthorDate: Tue Oct 6 12:37:52 2020 -0700
HDDS-4253. Add LayoutVersion request/response for DN registration. (#1457)
---
.../hdds/upgrade/HDDSLayoutFeatureCatalog.java | 4 +-
.../java/org/apache/hadoop/ozone/OzoneConsts.java | 2 -
.../states/endpoint/RegisterEndpointTask.java | 4 ++
.../protocol/StorageContainerNodeProtocol.java | 8 +++-
.../apache/hadoop/hdds/scm/node/NodeManager.java | 5 +++
.../hadoop/hdds/scm/node/SCMNodeManager.java | 35 +++++++++++++--
.../hdds/scm/server/SCMDatanodeProtocolServer.java | 8 ++--
.../hdds/scm/server/StorageContainerManager.java | 14 +++++-
.../java/org/apache/hadoop/hdds/scm/TestUtils.java | 2 +-
.../hadoop/hdds/scm/container/MockNodeManager.java | 10 +++--
.../TestIncrementalContainerReportHandler.java | 37 ++++++++++------
.../hdds/scm/node/TestContainerPlacement.java | 11 ++++-
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 18 ++++----
.../hdds/scm/node/TestNodeReportHandler.java | 17 +++++++-
.../hadoop/hdds/scm/node/TestSCMNodeManager.java | 51 +++++++++++++++++++---
.../hadoop/hdds/scm/node/TestStatisticsUpdate.java | 4 +-
.../scm/server/TestSCMBlockProtocolServer.java | 2 +-
.../testutils/ReplicationNodeManagerMock.java | 6 ++-
.../hadoop/ozone/scm/node/TestSCMNodeMetrics.java | 14 +++++-
.../hadoop/ozone/recon/scm/ReconNodeManager.java | 9 ++--
.../scm/ReconStorageContainerManagerFacade.java | 7 ++-
.../hadoop/ozone/recon/api/TestEndpoints.java | 4 +-
.../scm/AbstractReconContainerManagerTest.java | 12 ++++-
...TestReconIncrementalContainerReportHandler.java | 15 ++++++-
.../ozone/recon/scm/TestReconNodeManager.java | 11 +++--
.../ozone/recon/scm/TestReconPipelineManager.java | 26 ++++++++++-
26 files changed, 266 insertions(+), 70 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
index 9793f5d..830b699 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/upgrade/HDDSLayoutFeatureCatalog.java
@@ -31,8 +31,8 @@ public class HDDSLayoutFeatureCatalog {
* List of HDDS Features.
*/
public enum HDDSLayoutFeature implements LayoutFeature {
- INITIAL_VERSION(0, "Initial Layout Version");
-
+ INITIAL_VERSION(0, "Initial Layout Version"),
+ FIRST_UPGRADE_VERSION(1, "First Layout Version After Upgrade");
private int layoutVersion;
private String description;
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
index 0954bf0..9854d40 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java
@@ -38,8 +38,6 @@ public final class OzoneConsts {
public static final String STORAGE_DIR = "scm";
public static final String SCM_ID = "scmUuid";
- public static final String DATANODE_STORAGE_CONFIG = "datanode.config";
-
public static final String OZONE_SIMPLE_ROOT_USER = "root";
public static final String OZONE_SIMPLE_HDFS_USER = "hdfs";
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
index 6f8baa6..a6075b8 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java
@@ -16,6 +16,8 @@
*/
package org.apache.hadoop.ozone.container.common.states.endpoint;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success;
+
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import org.apache.commons.lang3.StringUtils;
@@ -160,6 +162,8 @@ public final class RegisterEndpointTask implements
"Unexpected datanode ID in the response.");
Preconditions.checkState(!StringUtils.isBlank(response.getClusterID()),
"Invalid cluster ID in the response.");
+ Preconditions.checkState(response.getErrorCode() == success,
+ "DataNode has higher Software Layout Version than SCM.");
if (response.hasHostname() && response.hasIpAddress()) {
datanodeDetails.setHostName(response.getHostname());
datanodeDetails.setIpAddress(response.getIpAddress());
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index cb55880..3375773 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -20,7 +20,9 @@ package org.apache.hadoop.ozone.protocol;
import org.apache.hadoop.hdds.annotation.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto
@@ -54,11 +56,13 @@ public interface StorageContainerNodeProtocol {
* @param datanodeDetails DatanodeDetails
* @param nodeReport NodeReportProto
* @param pipelineReport PipelineReportsProto
+ * @param layoutVersionInfo LayoutVersionProto
* @return SCMRegisteredResponseProto
*/
RegisteredCommand register(DatanodeDetails datanodeDetails,
NodeReportProto nodeReport,
- PipelineReportsProto pipelineReport);
+ PipelineReportsProto pipelineReport,
+ LayoutVersionProto layoutVersionInfo);
/**
* Send heartbeat to indicate the datanode is alive and doing well.
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index df21b84..82281bf 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.server.events.EventHandler;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -215,4 +216,8 @@ public interface NodeManager extends StorageContainerNodeProtocol,
NetworkTopology getClusterNetworkTopologyMap();
int getNumHealthyVolumes(List <DatanodeDetails> dnList);
+
+ default HDDSLayoutVersionManager getLayoutVersionManager(){
+ return null;
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 1a0cec3..359894d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -37,6 +37,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode;
@@ -53,6 +55,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
@@ -103,17 +106,21 @@ public class SCMNodeManager implements NodeManager {
private final boolean useHostname;
private final ConcurrentHashMap<String, Set<String>> dnsToUuidMap =
new ConcurrentHashMap<>();
+ private final HDDSLayoutVersionManager scmLayoutVersionManager;
/**
* Constructs SCM machine Manager.
*/
public SCMNodeManager(OzoneConfiguration conf,
- SCMStorageConfig scmStorageConfig, EventPublisher eventPublisher,
- NetworkTopology networkTopology) {
+ SCMStorageConfig scmStorageConfig,
+ EventPublisher eventPublisher,
+ NetworkTopology networkTopology,
+ HDDSLayoutVersionManager layoutVersionManager) {
this.nodeStateManager = new NodeStateManager(conf, eventPublisher);
this.version = VersionInfo.getLatestVersion();
this.commandQueue = new CommandQueue();
this.scmStorageConfig = scmStorageConfig;
+ this.scmLayoutVersionManager = layoutVersionManager;
LOG.info("Entering startup safe mode.");
registerMXBean();
this.metrics = SCMNodeMetrics.create(this);
@@ -240,8 +247,19 @@ public class SCMNodeManager implements NodeManager {
@Override
public RegisteredCommand register(
DatanodeDetails datanodeDetails, NodeReportProto nodeReport,
- PipelineReportsProto pipelineReportsProto) {
-
+ PipelineReportsProto pipelineReportsProto,
+ LayoutVersionProto layoutInfo) {
+
+ if (layoutInfo != null) {
+ if (layoutInfo.getSoftwareLayoutVersion() >
+ scmLayoutVersionManager.getSoftwareLayoutVersion()) {
+ return RegisteredCommand.newBuilder()
+ .setErrorCode(ErrorCode.errorNodeNotPermitted)
+ .setDatanode(datanodeDetails)
+ .setClusterID(this.scmStorageConfig.getClusterID())
+ .build();
+ }
+ }
if (!isNodeRegistered(datanodeDetails)) {
InetAddress dnAddress = Server.getRemoteIp();
if (dnAddress != null) {
@@ -742,4 +760,13 @@ public class SCMNodeManager implements NodeManager {
long getSkippedHealthChecks() {
return nodeStateManager.getSkippedHealthChecks();
}
+
+ /**
+ * @return HDDSLayoutVersionManager
+ */
+ @VisibleForTesting
+ @Override
+ public HDDSLayoutVersionManager getLayoutVersionManager() {
+ return scmLayoutVersionManager;
+ }
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index a1ee9e5..1b4c370 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -33,6 +33,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
@@ -207,9 +209,8 @@ public class SCMDatanodeProtocolServer implements
NodeReportProto nodeReport,
ContainerReportsProto containerReportsProto,
PipelineReportsProto pipelineReportsProto,
- StorageContainerDatanodeProtocolProtos.LayoutVersionProto layoutInfo)
+ LayoutVersionProto layoutInfo)
throws IOException {
- //TODO : DataNode-Upgrade: layoutinfo related processing.
DatanodeDetails datanodeDetails = DatanodeDetails
.getFromProtoBuf(datanodeDetailsProto);
boolean auditSuccess = true;
@@ -218,7 +219,8 @@ public class SCMDatanodeProtocolServer implements
// TODO : Return the list of Nodes that forms the SCM HA.
RegisteredCommand registeredCommand = scm.getScmNodeManager()
- .register(datanodeDetails, nodeReport, pipelineReportsProto);
+ .register(datanodeDetails, nodeReport, pipelineReportsProto,
+ layoutInfo);
if (registeredCommand.getError()
== SCMRegisteredResponseProto.ErrorCode.success) {
eventPublisher.fireEvent(CONTAINER_REPORT,
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 3cf12e7..91e8cb9 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -93,6 +93,7 @@ import org.apache.hadoop.hdds.security.x509.certificate.utils.CertificateCodec;
import org.apache.hadoop.hdds.server.ServiceRuntimeInfoImpl;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.HddsServerUtil;
import org.apache.hadoop.hdds.utils.HddsVersionInfo;
import org.apache.hadoop.hdds.utils.LegacyHadoopConfigurationSource;
@@ -203,6 +204,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
private NetworkTopology clusterMap;
private PipelineChoosePolicy pipelineChoosePolicy;
+ private HDDSLayoutVersionManager scmLayoutVersionManager;
+
/**
* Creates a new StorageContainerManager. Configuration will be
* updated with information on the actual listening addresses used
@@ -250,6 +253,9 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
"failure.", ResultCodes.SCM_NOT_INITIALIZED);
}
+ scmLayoutVersionManager =
+ HDDSLayoutVersionManager.initialize(scmStorageConfig);
+
/**
* Important : This initialization sequence is assumed by some of our tests.
* The testSecureOzoneCluster assumes that security checks have to be
@@ -397,8 +403,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
if(configurator.getScmNodeManager() != null) {
scmNodeManager = configurator.getScmNodeManager();
} else {
- scmNodeManager = new SCMNodeManager(
- conf, scmStorageConfig, eventQueue, clusterMap);
+ scmNodeManager = new SCMNodeManager(conf, scmStorageConfig, eventQueue,
+ clusterMap, scmLayoutVersionManager);
}
placementMetrics = SCMContainerPlacementMetrics.create();
@@ -1149,4 +1155,8 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
public String getClusterId() {
return getScmStorageConfig().getClusterID();
}
+
+ public HDDSLayoutVersionManager getLayoutVersionManager() {
+ return scmLayoutVersionManager;
+ }
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index f4f1759..dd3b101 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -116,7 +116,7 @@ public final class TestUtils {
SCMNodeManager nodeManager) {
return getDatanodeDetails(
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(), null,
- getRandomPipelineReports()));
+ getRandomPipelineReports(), null));
}
/**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 4b8b37d..dab457e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -19,6 +19,8 @@ package org.apache.hadoop.hdds.scm.container;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
+import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.net.NetConstants;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
@@ -109,14 +111,14 @@ public class MockNodeManager implements NodeManager {
if (!nodes.isEmpty()) {
for (int x = 0; x < nodes.size(); x++) {
DatanodeDetails node = nodes.get(x);
- register(node, null, null);
+ register(node, null, null, null);
populateNodeMetric(node, x);
}
}
if (initializeFakeNodes) {
for (int x = 0; x < nodeCount; x++) {
DatanodeDetails dd = MockDatanodeDetails.randomDatanodeDetails();
- register(dd, null, null);
+ register(dd, null, null, null);
populateNodeMetric(dd, x);
}
}
@@ -441,7 +443,9 @@ public class MockNodeManager implements NodeManager {
*/
@Override
public RegisteredCommand register(DatanodeDetails datanodeDetails,
- NodeReportProto nodeReport, PipelineReportsProto pipelineReportsProto) {
+ NodeReportProto nodeReport,
+ PipelineReportsProto pipelineReportsProto,
+ LayoutVersionProto layoutInfo) {
try {
node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(),
Collections.emptySet());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
index 1af2f73..5d9246a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestIncrementalContainerReportHandler.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assert;
@@ -61,6 +62,9 @@ public class TestIncrementalContainerReportHandler {
private ContainerManager containerManager;
private ContainerStateManager containerStateManager;
private EventPublisher publisher;
+ private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
+ private static final Integer METADATA_LAYOUT_VERSION = 1;
+ private HDDSLayoutVersionManager versionManager;
@Before
public void setup() throws IOException {
@@ -73,8 +77,15 @@ public class TestIncrementalContainerReportHandler {
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
+ this.versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
this.nodeManager =
- new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
+ new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
+ versionManager);
this.containerStateManager = new ContainerStateManager(conf);
this.publisher = Mockito.mock(EventPublisher.class);
@@ -123,9 +134,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
- nodeManager.register(datanodeOne, null, null);
- nodeManager.register(datanodeTwo, null, null);
- nodeManager.register(datanodeThree, null, null);
+ nodeManager.register(datanodeOne, null, null, null);
+ nodeManager.register(datanodeTwo, null, null, null);
+ nodeManager.register(datanodeThree, null, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSING,
@@ -160,9 +171,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
- nodeManager.register(datanodeOne, null, null);
- nodeManager.register(datanodeTwo, null, null);
- nodeManager.register(datanodeThree, null, null);
+ nodeManager.register(datanodeOne, null, null, null);
+ nodeManager.register(datanodeTwo, null, null, null);
+ nodeManager.register(datanodeThree, null, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSING,
@@ -198,9 +209,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
- nodeManager.register(datanodeOne, null, null);
- nodeManager.register(datanodeTwo, null, null);
- nodeManager.register(datanodeThree, null, null);
+ nodeManager.register(datanodeOne, null, null, null);
+ nodeManager.register(datanodeTwo, null, null, null);
+ nodeManager.register(datanodeThree, null, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSING,
@@ -239,9 +250,9 @@ public class TestIncrementalContainerReportHandler {
final DatanodeDetails datanodeOne = randomDatanodeDetails();
final DatanodeDetails datanodeTwo = randomDatanodeDetails();
final DatanodeDetails datanodeThree = randomDatanodeDetails();
- nodeManager.register(datanodeOne, null, null);
- nodeManager.register(datanodeTwo, null, null);
- nodeManager.register(datanodeThree, null, null);
+ nodeManager.register(datanodeOne, null, null, null);
+ nodeManager.register(datanodeTwo, null, null, null);
+ nodeManager.register(datanodeThree, null, null, null);
final Set<ContainerReplica> containerReplicas = getReplicas(
container.containerID(),
ContainerReplicaProto.State.CLOSED,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 797709e..a7f6466 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
import org.apache.hadoop.hdds.scm.pipeline.SCMPipelineManager;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
import org.apache.hadoop.test.PathUtils;
@@ -61,6 +62,8 @@ import org.mockito.Mockito;
* Test for different container placement policy.
*/
public class TestContainerPlacement {
+ private static final int SOFTWARE_LAYOUT_VERSION = 1;
+ private static final int METADATA_LAYOUT_VERSION = 1;
@Rule
public ExpectedException thrown = ExpectedException.none();
@@ -105,8 +108,14 @@ public class TestContainerPlacement {
SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
+ HDDSLayoutVersionManager versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
SCMNodeManager nodeManager = new SCMNodeManager(config,
- storageConfig, eventQueue, null);
+ storageConfig, eventQueue, null, versionManager);
return nodeManager;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index f05be76..3e725ce 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -140,25 +140,25 @@ public class TestDeadNodeHandler {
// test case happy.
nodeManager.register(datanode1,
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(datanode2,
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(datanode3,
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(MockDatanodeDetails.randomDatanodeDetails(),
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
LambdaTestUtils.await(120000, 1000,
() -> {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 69b031c..2710225 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.Event;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Assert;
import org.junit.Before;
@@ -48,6 +49,9 @@ public class TestNodeReportHandler implements EventPublisher {
private static final Logger LOG = LoggerFactory
.getLogger(TestNodeReportHandler.class);
private NodeReportHandler nodeReportHandler;
+ private HDDSLayoutVersionManager versionManager;
+ private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
+ private static final Integer METADATA_LAYOUT_VERSION = 1;
private SCMNodeManager nodeManager;
private String storagePath = GenericTestUtils.getRandomizedTempPath()
.concat("/" + UUID.randomUUID().toString());
@@ -58,8 +62,16 @@ public class TestNodeReportHandler implements EventPublisher {
SCMStorageConfig storageConfig = Mockito.mock(SCMStorageConfig.class);
Mockito.when(storageConfig.getClusterID()).thenReturn("cluster1");
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
+
+ this.versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
nodeManager =
- new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap);
+ new SCMNodeManager(conf, storageConfig, new EventQueue(), clusterMap,
+ versionManager);
nodeReportHandler = new NodeReportHandler(nodeManager);
}
@@ -72,7 +84,8 @@ public class TestNodeReportHandler implements EventPublisher {
SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
Assert.assertNull(nodeMetric);
- nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null);
+ nodeManager.register(dn, getNodeReport(dn, storageOne).getReport(), null,
+ null);
nodeMetric = nodeManager.getNodeStat(dn);
Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
index 7a58d46..62b7ac4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeManager.java
@@ -35,6 +35,8 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
@@ -49,6 +51,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.test.GenericTestUtils;
@@ -64,10 +67,13 @@ import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanode
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.STALE;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.errorNodeNotPermitted;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMRegisteredResponseProto.ErrorCode.success;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.apache.hadoop.hdds.scm.TestUtils.getRandomPipelineReports;
import static org.apache.hadoop.hdds.scm.events.SCMEvents.DATANODE_COMMAND;
import org.junit.After;
import org.junit.Assert;
@@ -171,6 +177,38 @@ public class TestSCMNodeManager {
}
/**
+ * Tests that Node manager handles Layout versions correctly.
+ *
+ * @throws IOException
+ * @throws InterruptedException
+ * @throws TimeoutException
+ */
+ @Test
+ public void testScmLayoutOnRegister()
+ throws IOException, InterruptedException, AuthenticationException {
+
+ try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
+ Integer nodeManagerSoftwareLayoutVersion =
+ nodeManager.getLayoutVersionManager().getSoftwareLayoutVersion();
+ LayoutVersionProto layoutInfoSuccess = LayoutVersionProto.newBuilder()
+ .setMetadataLayoutVersion(1)
+ .setSoftwareLayoutVersion(nodeManagerSoftwareLayoutVersion).build();
+ LayoutVersionProto layoutInfoFailure = LayoutVersionProto.newBuilder()
+ .setMetadataLayoutVersion(1)
+ .setSoftwareLayoutVersion(nodeManagerSoftwareLayoutVersion + 1)
+ .build();
+ RegisteredCommand rcmd = nodeManager.register(
+ MockDatanodeDetails.randomDatanodeDetails(), null,
+ getRandomPipelineReports(), layoutInfoSuccess);
+ assertTrue(rcmd.getError() == success);
+ rcmd = nodeManager.register(
+ MockDatanodeDetails.randomDatanodeDetails(), null,
+ getRandomPipelineReports(), layoutInfoFailure);
+ assertTrue(rcmd.getError() == errorNodeNotPermitted);
+ }
+ }
+
+ /**
* asserts that if we send no heartbeats node manager stays in safemode.
*
* @throws IOException
@@ -859,7 +897,8 @@ public class TestSCMNodeManager {
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
StorageReportProto report = TestUtils
.createStorageReport(dnId, storagePath, capacity, used, free, null);
- nodeManager.register(dn, TestUtils.createNodeReport(report), null);
+ nodeManager.register(dn, TestUtils.createNodeReport(report), null,
+ null);
nodeManager.processHeartbeat(dn);
}
//TODO: wait for EventQueue to be processed
@@ -910,7 +949,7 @@ public class TestSCMNodeManager {
used, free, null, failed));
failed = !failed;
}
- nodeManager.register(dn, TestUtils.createNodeReport(reports), null);
+ nodeManager.register(dn, TestUtils.createNodeReport(reports), null, null);
nodeManager.processHeartbeat(dn);
//TODO: wait for EventQueue to be processed
eventQueue.processAll(8000L);
@@ -1081,7 +1120,7 @@ public class TestSCMNodeManager {
nodemanager
.register(datanodeDetails, TestUtils.createNodeReport(report),
- TestUtils.getRandomPipelineReports());
+ getRandomPipelineReports(), null);
eq.fireEvent(DATANODE_COMMAND,
new CommandForDatanode<>(datanodeDetails.getUuid(),
new CloseContainerCommand(1L,
@@ -1169,7 +1208,7 @@ public class TestSCMNodeManager {
for (int i = 0; i < nodeCount; i++) {
DatanodeDetails node = createDatanodeDetails(
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
- nodeManager.register(node, null, null);
+ nodeManager.register(node, null, null, null);
nodes[i] = node;
}
@@ -1213,7 +1252,7 @@ public class TestSCMNodeManager {
for (int i = 0; i < nodeCount; i++) {
DatanodeDetails node = createDatanodeDetails(
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
- nodeManager.register(node, null, null);
+ nodeManager.register(node, null, null, null);
nodes[i] = node;
}
@@ -1263,7 +1302,7 @@ public class TestSCMNodeManager {
for (int i = 0; i < nodeCount; i++) {
DatanodeDetails node = createDatanodeDetails(
UUID.randomUUID().toString(), hostNames[i], ipAddress[i], null);
- nodeManager.register(node, null, null);
+ nodeManager.register(node, null, null, null);
}
// test get node
Assert.assertEquals(0, nodeManager.getNodesByAddress(null).size());
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
index a6b0339..e07edc4 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestStatisticsUpdate.java
@@ -94,9 +94,9 @@ public class TestStatisticsUpdate {
datanode2.getUuid(), storagePath2, 200, 20, 180, null);
nodeManager.register(datanode1,
- TestUtils.createNodeReport(storageOne), null);
+ TestUtils.createNodeReport(storageOne), null, null);
nodeManager.register(datanode2,
- TestUtils.createNodeReport(storageTwo), null);
+ TestUtils.createNodeReport(storageTwo), null, null);
NodeReportProto nodeReportProto1 = TestUtils.createNodeReport(storageOne);
NodeReportProto nodeReportProto2 = TestUtils.createNodeReport(storageTwo);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
index 349e705..945d890 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMBlockProtocolServer.java
@@ -64,7 +64,7 @@ public class TestSCMBlockProtocolServer {
// add nodes to scm node manager
nodeManager = scm.getScmNodeManager();
for (int i = 0; i < nodeCount; i++) {
- nodeManager.register(randomDatanodeDetails(), null, null);
+ nodeManager.register(randomDatanodeDetails(), null, null, null);
}
server = scm.getBlockProtocolServer();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index a9b879f..8b77899 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -17,6 +17,9 @@
package org.apache.hadoop.ozone.container.testutils;
import com.google.common.base.Preconditions;
+
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
@@ -264,7 +267,8 @@ public class ReplicationNodeManagerMock implements NodeManager {
@Override
public RegisteredCommand register(DatanodeDetails dd,
NodeReportProto nodeReport,
- PipelineReportsProto pipelineReportsProto) {
+ PipelineReportsProto pipelineReportsProto,
+ LayoutVersionProto layoutInfo) {
return null;
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
index 7576e8b..d20c55b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/scm/node/TestSCMNodeMetrics.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.scm.node.SCMNodeMetrics;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.test.MetricsAsserts.assertGauge;
@@ -45,12 +46,15 @@ import org.junit.Assert;
import static org.junit.Assert.assertEquals;
import org.junit.BeforeClass;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* Test cases to verify the metrics exposed by SCMNodeManager.
*/
public class TestSCMNodeMetrics {
+ private static final Integer METADATA_LAYOUT_VERSION = 1;
+ private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
private static SCMNodeManager nodeManager;
private static DatanodeDetails registeredDatanode;
@@ -62,8 +66,14 @@ public class TestSCMNodeMetrics {
EventQueue publisher = new EventQueue();
SCMStorageConfig config =
new SCMStorageConfig(NodeType.DATANODE, new File("/tmp"), "storage");
+ HDDSLayoutVersionManager versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
nodeManager = new SCMNodeManager(source, config, publisher,
- new NetworkTopologyImpl(source));
+ new NetworkTopologyImpl(source), versionManager);
registeredDatanode = DatanodeDetails.newBuilder()
.setHostName("localhost")
@@ -72,7 +82,7 @@ public class TestSCMNodeMetrics {
.build();
nodeManager.register(registeredDatanode, createNodeReport(),
- PipelineReportsProto.newBuilder().build());
+ PipelineReportsProto.newBuilder().build(), null);
}
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
index d7a6104..7283f5e 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconNodeManager.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.db.Table;
import org.apache.hadoop.hdds.utils.db.TableIterator;
import org.apache.hadoop.ozone.protocol.commands.CommandForDatanode;
@@ -65,8 +66,10 @@ public class ReconNodeManager extends SCMNodeManager {
SCMStorageConfig scmStorageConfig,
EventPublisher eventPublisher,
NetworkTopology networkTopology,
- Table<UUID, DatanodeDetails> nodeDB) {
- super(conf, scmStorageConfig, eventPublisher, networkTopology);
+ Table<UUID, DatanodeDetails> nodeDB,
+ HDDSLayoutVersionManager scmLayoutVersionManager) {
+ super(conf, scmStorageConfig, eventPublisher, networkTopology,
+ scmLayoutVersionManager);
this.nodeDB = nodeDB;
loadExistingNodes();
}
@@ -78,7 +81,7 @@ public class ReconNodeManager extends SCMNodeManager {
iterator = nodeDB.iterator();
while (iterator.hasNext()) {
DatanodeDetails datanodeDetails = iterator.next().getValue();
- register(datanodeDetails, null, null);
+ register(datanodeDetails, null, null, null);
nodeCount++;
}
LOG.info("Loaded {} nodes from node DB.", nodeCount);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
index 3a0342e..15ac8e6 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconStorageContainerManagerFacade.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdds.scm.safemode.SafeModeManager;
import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.io.IOUtils;
@@ -87,6 +88,7 @@ public class ReconStorageContainerManagerFacade
private Set<ReconScmTask> reconScmTasks = new HashSet<>();
private SCMContainerPlacementMetrics placementMetrics;
private PlacementPolicy containerPlacementPolicy;
+ private HDDSLayoutVersionManager scmLayoutVersionManager;
@Inject
public ReconStorageContainerManagerFacade(OzoneConfiguration conf,
@@ -102,9 +104,12 @@ public class ReconStorageContainerManagerFacade
dbStore = DBStoreBuilder
.createDBStore(ozoneConfiguration, new ReconSCMDBDefinition());
+ this.scmLayoutVersionManager = HDDSLayoutVersionManager
+ .initialize(this.scmStorageConfig);
this.nodeManager =
new ReconNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
- ReconSCMDBDefinition.NODES.getTable(dbStore));
+ ReconSCMDBDefinition.NODES.getTable(dbStore),
+ this.scmLayoutVersionManager);
placementMetrics = SCMContainerPlacementMetrics.create();
this.containerPlacementPolicy =
ContainerPlacementPolicyFactory.getPolicy(conf, nodeManager,
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index bbfcade..aaed73b 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -284,11 +284,11 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
try {
reconScm.getDatanodeProtocolServer()
.register(datanodeDetailsProto, nodeReportProto,
- containerReportsProto, pipelineReportsProto, layoutInfo);
+ containerReportsProto, pipelineReportsProto, null);
reconScm.getDatanodeProtocolServer()
.register(datanodeDetailsProto2, nodeReportProto2,
ContainerReportsProto.newBuilder().build(),
- PipelineReportsProto.newBuilder().build(), layoutInfo);
+ PipelineReportsProto.newBuilder().build(), null);
// Process all events in the event queue
reconScm.getEventQueue().processAll(1000);
} catch (Exception ex) {
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 783f42c..365ab5f 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -32,6 +32,7 @@ import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -65,6 +66,9 @@ public class AbstractReconContainerManagerTest {
private ReconPipelineManager pipelineManager;
private ReconContainerManager containerManager;
private DBStore store;
+ private HDDSLayoutVersionManager layoutVersionManager;
+ public static final int SOFTWARE_LAYOUT_VERSION = 1;
+ public static final int METADATA_LAYOUT_VERSION = 1;
@Before
public void setUp() throws Exception {
@@ -76,8 +80,14 @@ public class AbstractReconContainerManagerTest {
scmStorageConfig = new ReconStorageConfig(conf);
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
+ layoutVersionManager = mock(HDDSLayoutVersionManager.class);
+ when(layoutVersionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
+ when(layoutVersionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
NodeManager nodeManager =
- new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+ new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
+ layoutVersionManager);
pipelineManager = new ReconPipelineManager(conf, nodeManager,
ReconSCMDBDefinition.PIPELINES.getTable(store), eventQueue);
containerManager = new ReconContainerManager(
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 1b42f21..d0eacc6 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -50,14 +50,17 @@ import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.Incremen
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* Test Recon ICR handler.
*/
public class TestReconIncrementalContainerReportHandler
extends AbstractReconContainerManagerTest {
+ private HDDSLayoutVersionManager versionManager;
@Test
public void testProcessICR() throws IOException, NodeNotFoundException {
@@ -81,9 +84,17 @@ public class TestReconIncrementalContainerReportHandler
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
SCMStorageConfig storageConfig = new SCMStorageConfig(conf);
+ this.versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
+
NodeManager nodeManager =
- new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap);
- nodeManager.register(datanodeDetails, null, null);
+ new SCMNodeManager(conf, storageConfig, eventQueue, clusterMap,
+ versionManager);
+ nodeManager.register(datanodeDetails, null, null, null);
ReconContainerManager containerManager = getContainerManager();
ReconIncrementalContainerReportHandler reconIcr =
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
index c934cae..c0c973b 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconNodeManager.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
import org.apache.hadoop.hdds.scm.net.NetworkTopologyImpl;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.hdds.utils.db.Table;
@@ -52,6 +53,8 @@ public class TestReconNodeManager {
private OzoneConfiguration conf;
private DBStore store;
+ private ReconStorageConfig reconStorageConfig;
+ private HDDSLayoutVersionManager versionManager;
@Before
public void setUp() throws Exception {
@@ -59,6 +62,8 @@ public class TestReconNodeManager {
conf.set(OZONE_METADATA_DIRS,
temporaryFolder.newFolder().getAbsolutePath());
conf.set(OZONE_SCM_NAMES, "localhost");
+ reconStorageConfig = new ReconStorageConfig(conf);
+ versionManager = HDDSLayoutVersionManager.initialize(reconStorageConfig);
store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
}
@@ -75,7 +80,7 @@ public class TestReconNodeManager {
Table<UUID, DatanodeDetails> nodeTable =
ReconSCMDBDefinition.NODES.getTable(store);
ReconNodeManager reconNodeManager = new ReconNodeManager(conf,
- scmStorageConfig, eventQueue, clusterMap, nodeTable);
+ scmStorageConfig, eventQueue, clusterMap, nodeTable, versionManager);
ReconNewNodeHandler reconNewNodeHandler =
new ReconNewNodeHandler(reconNodeManager);
assertTrue(reconNodeManager.getAllNodes().isEmpty());
@@ -84,7 +89,7 @@ public class TestReconNodeManager {
String uuidString = datanodeDetails.getUuidString();
// Register a random datanode.
- reconNodeManager.register(datanodeDetails, null, null);
+ reconNodeManager.register(datanodeDetails, null, null, null);
reconNewNodeHandler.onMessage(reconNodeManager.getNodeByUuid(uuidString),
null);
@@ -95,7 +100,7 @@ public class TestReconNodeManager {
eventQueue.close();
reconNodeManager.close();
reconNodeManager = new ReconNodeManager(conf, scmStorageConfig, eventQueue,
- clusterMap, nodeTable);
+ clusterMap, nodeTable, versionManager);
// Verify that the node information was persisted and loaded back.
assertEquals(1, reconNodeManager.getAllNodes().size());
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
index b190810..a670717 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineManager.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.scm.pipeline.PipelineProvider;
import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.hadoop.hdds.utils.db.DBStore;
import org.apache.hadoop.hdds.utils.db.DBStoreBuilder;
import org.apache.hadoop.ozone.recon.scm.ReconPipelineFactory.ReconPipelineProvider;
@@ -52,6 +53,8 @@ import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
import static org.mockito.Mockito.mock;
/**
@@ -59,12 +62,16 @@ import static org.mockito.Mockito.mock;
*/
public class TestReconPipelineManager {
+ private static final Integer SOFTWARE_LAYOUT_VERSION = 1;
+ private static final Integer METADATA_LAYOUT_VERSION = 1;
+
@Rule
public TemporaryFolder temporaryFolder = new TemporaryFolder();
private OzoneConfiguration conf;
private SCMStorageConfig scmStorageConfig;
private DBStore store;
+ private HDDSLayoutVersionManager versionManager;
@Before
public void setup() throws IOException {
@@ -109,8 +116,16 @@ public class TestReconPipelineManager {
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
+
+ this.versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
NodeManager nodeManager =
- new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+ new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
+ versionManager);
try (ReconPipelineManager reconPipelineManager =
new ReconPipelineManager(conf, nodeManager,
@@ -145,8 +160,15 @@ public class TestReconPipelineManager {
Pipeline pipeline = getRandomPipeline();
NetworkTopology clusterMap = new NetworkTopologyImpl(conf);
EventQueue eventQueue = new EventQueue();
+ this.versionManager =
+ Mockito.mock(HDDSLayoutVersionManager.class);
+ Mockito.when(versionManager.getMetadataLayoutVersion())
+ .thenReturn(METADATA_LAYOUT_VERSION);
+ Mockito.when(versionManager.getSoftwareLayoutVersion())
+ .thenReturn(SOFTWARE_LAYOUT_VERSION);
NodeManager nodeManager =
- new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap);
+ new SCMNodeManager(conf, scmStorageConfig, eventQueue, clusterMap,
+ versionManager);
ReconPipelineManager reconPipelineManager =
new ReconPipelineManager(conf, nodeManager,
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org