You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by ja...@apache.org on 2022/07/05 01:40:29 UTC
[ozone] branch master updated: HDDS-6960. EC: Implement the Over-replication Handler (#3572)
This is an automated email from the ASF dual-hosted git repository.
jacksonyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new fac23c9f3a HDDS-6960. EC: Implement the Over-replication Handler (#3572)
fac23c9f3a is described below
commit fac23c9f3a562478d521df89722e3c00e7aa7f71
Author: Jie Yao <ja...@tencent.com>
AuthorDate: Tue Jul 5 09:40:23 2022 +0800
HDDS-6960. EC: Implement the Over-replication Handler (#3572)
---
.../scm/container/RatisContainerReplicaCount.java | 1 +
.../AbstractOverReplicationHandler.java | 110 ++++++++++++++
.../{ => replication}/ContainerReplicaCount.java | 5 +-
.../replication/ECContainerHealthCheck.java | 1 -
.../{ => replication}/ECContainerReplicaCount.java | 5 +-
.../replication/ECOverReplicationHandler.java | 165 +++++++++++++++++++++
.../replication/ECUnderReplicationHandler.java | 3 +-
.../replication/LegacyReplicationManager.java | 1 -
.../container/replication/ReplicationManager.java | 3 +-
...ndler.java => UnhealthyReplicationHandler.java} | 2 +-
.../hdds/scm/node/DatanodeAdminMonitorImpl.java | 2 +-
.../container/replication/ReplicationTestUtil.java | 30 ++++
.../TestECContainerReplicaCount.java | 5 +-
.../replication/TestECOverReplicationHandler.java | 159 ++++++++++++++++++++
.../TestECUnderReplicationHandler.java | 30 +---
.../TestRatisContainerReplicaCount.java | 24 ++-
.../hdds/scm/node/TestDatanodeAdminMonitor.java | 2 +-
.../scm/node/TestDecommissionAndMaintenance.java | 2 +-
18 files changed, 490 insertions(+), 60 deletions(-)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/RatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/RatisContainerReplicaCount.java
index f7c5b28f00..f25423d4ec 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/RatisContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/RatisContainerReplicaCount.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.container;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
import java.util.Set;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java
new file mode 100644
index 0000000000..da8d52bb1e
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/AbstractOverReplicationHandler.java
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.ContainerPlacementStatus;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * This class holds some common methods that will be shared among
+ * different kinds of implementation of OverReplicationHandler.
+ * */
+public abstract class AbstractOverReplicationHandler
+ implements UnhealthyReplicationHandler {
+ private final PlacementPolicy placementPolicy;
+
+ protected AbstractOverReplicationHandler(PlacementPolicy placementPolicy) {
+ this.placementPolicy = placementPolicy;
+ }
+
+ /**
+ * Identify a new set of datanode(s) to delete the container
+ * and form the SCM commands to send it to DN.
+ *
+ * @param replicas - Set of available container replicas.
+ * @param pendingOps - Inflight replications and deletion ops.
+ * @param result - Health check result.
+ * @param remainingMaintenanceRedundancy - represents that how many nodes go
+ * into maintenance.
+ * @return Returns the key value pair of destination dn where the command gets
+ * executed and the command itself.
+ */
+ public abstract Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands(
+ Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps,
+ ContainerHealthResult result, int remainingMaintenanceRedundancy);
+
+ /**
+ * Identify whether the placement status is actually equal for a
+ * replica set after removing those filtered replicas.
+ *
+ * @param replicas the oringianl set of replicas
+ * @param replicationFactor the criteria which replicas should be removed.
+ * @param replica the replica to be removed
+ */
+ public boolean isPlacementStatusActuallyEqualAfterRemove(
+ final Set<ContainerReplica> replicas,
+ final ContainerReplica replica,
+ final int replicationFactor) {
+ ContainerPlacementStatus currentCPS =
+ getPlacementStatus(replicas, replicationFactor);
+ replicas.remove(replica);
+ ContainerPlacementStatus newCPS =
+ getPlacementStatus(replicas, replicationFactor);
+ replicas.add(replica);
+ return isPlacementStatusActuallyEqual(currentCPS, newCPS);
+ }
+
+ /**
+ * Given a set of ContainerReplica, transform it to a list of DatanodeDetails
+ * and then check if the list meets the container placement policy.
+ * @param replicas List of containerReplica
+ * @param replicationFactor Expected Replication Factor of the containe
+ * @return ContainerPlacementStatus indicating if the policy is met or not
+ */
+ private ContainerPlacementStatus getPlacementStatus(
+ Set<ContainerReplica> replicas, int replicationFactor) {
+ List<DatanodeDetails> replicaDns = replicas.stream()
+ .map(ContainerReplica::getDatanodeDetails)
+ .collect(Collectors.toList());
+ return placementPolicy.validateContainerPlacement(
+ replicaDns, replicationFactor);
+ }
+
+ /**
+ * whether the given two ContainerPlacementStatus are actually equal.
+ *
+ * @param cps1 ContainerPlacementStatus
+ * @param cps2 ContainerPlacementStatus
+ */
+ private boolean isPlacementStatusActuallyEqual(
+ ContainerPlacementStatus cps1,
+ ContainerPlacementStatus cps2) {
+ return (!cps1.isPolicySatisfied() &&
+ cps1.actualPlacementCount() == cps2.actualPlacementCount()) ||
+ cps1.isPolicySatisfied() && cps2.isPolicySatisfied();
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
similarity index 92%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
index e23c4e691e..a3160208e3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ContainerReplicaCount.java
@@ -15,10 +15,11 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.container;
+package org.apache.hadoop.hdds.scm.container.replication;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.replication.LegacyReplicationManager;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import java.util.Set;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerHealthCheck.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerHealthCheck.java
index b79fc03d07..092cc13574 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerHealthCheck.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerHealthCheck.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdds.scm.container.replication;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;
import java.util.List;
import java.util.Set;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ECContainerReplicaCount.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
similarity index 98%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ECContainerReplicaCount.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
index 822fb3a548..ff0a75053a 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ECContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECContainerReplicaCount.java
@@ -15,11 +15,12 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.container;
+package org.apache.hadoop.hdds.scm.container.replication;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaOp;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import java.util.ArrayList;
import java.util.Collections;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java
new file mode 100644
index 0000000000..8c9434b295
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECOverReplicationHandler.java
@@ -0,0 +1,165 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.ozone.protocol.commands.DeleteContainerCommand;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+
+import static java.util.Collections.emptyMap;
+
+/**
+ * Handles the EC Over replication processing and forming the respective SCM
+ * commands.
+ */
+public class ECOverReplicationHandler extends AbstractOverReplicationHandler {
+ public static final Logger LOG =
+ LoggerFactory.getLogger(ECOverReplicationHandler.class);
+
+ private final ECContainerHealthCheck ecContainerHealthCheck =
+ new ECContainerHealthCheck();
+ private final NodeManager nodeManager;
+
+ public ECOverReplicationHandler(PlacementPolicy placementPolicy,
+ NodeManager nodeManager) {
+ super(placementPolicy);
+ this.nodeManager = nodeManager;
+ }
+
+ /**
+ * Identify a new set of datanode(s) to delete the container
+ * and form the SCM commands to send it to DN.
+ *
+ * @param replicas - Set of available container replicas.
+ * @param pendingOps - Inflight replications and deletion ops.
+ * @param result - Health check result.
+ * @param remainingMaintenanceRedundancy - represents that how many nodes go
+ * into maintenance.
+ * @return Returns the key value pair of destination dn where the command gets
+ * executed and the command itself.
+ */
+ @Override
+ public Map<DatanodeDetails, SCMCommand<?>> processAndCreateCommands(
+ Set<ContainerReplica> replicas, List<ContainerReplicaOp> pendingOps,
+ ContainerHealthResult result, int remainingMaintenanceRedundancy) {
+ ContainerInfo container = result.getContainerInfo();
+ ContainerHealthResult currentUnderRepRes = ecContainerHealthCheck
+ .checkHealth(container, replicas, pendingOps,
+ remainingMaintenanceRedundancy);
+ LOG.debug("Handling over-replicated EC container: {}", container);
+
+ //sanity check
+ if (currentUnderRepRes.getHealthState() !=
+ ContainerHealthResult.HealthState.OVER_REPLICATED) {
+ LOG.info("The container {} state changed and it's not in over"
+ + " replication any more. Current state is: {}",
+ container.getContainerID(), currentUnderRepRes);
+ return emptyMap();
+ }
+
+ ContainerHealthResult.OverReplicatedHealthResult containerHealthResult =
+ ((ContainerHealthResult.OverReplicatedHealthResult)
+ currentUnderRepRes);
+ if (containerHealthResult.isSufficientlyReplicatedAfterPending()) {
+ LOG.info("The container {} with replicas {} will be corrected " +
+ "by the pending delete", container.getContainerID(), replicas);
+ return emptyMap();
+ }
+
+ // we don`t support hybrid state(both under and over replicated) for
+ // EC container and we always handle under-replicated first now. it
+ // means when reaching here, we have all the replica indexes and some
+ // of them are more than 1.
+ // TODO: support hybrid state if needed.
+ final ECContainerReplicaCount replicaCount =
+ new ECContainerReplicaCount(container, replicas, pendingOps,
+ remainingMaintenanceRedundancy);
+
+ List<Integer> overReplicatedIndexes =
+ replicaCount.overReplicatedIndexes(true);
+ //sanity check
+ if (overReplicatedIndexes.size() == 0) {
+ LOG.warn("The container {} with replicas {} is found over replicated " +
+ "by ContainerHealthCheck, but found not over replicated by " +
+ "ECContainerReplicaCount",
+ container.getContainerID(), replicas);
+ return emptyMap();
+ }
+
+ final List<DatanodeDetails> deletionInFlight = new ArrayList<>();
+ for (ContainerReplicaOp op : pendingOps) {
+ if (op.getOpType() == ContainerReplicaOp.PendingOpType.DELETE) {
+ deletionInFlight.add(op.getTarget());
+ }
+ }
+ Map<Integer, List<ContainerReplica>> index2replicas = new HashMap<>();
+ replicas.stream()
+ .filter(r -> overReplicatedIndexes.contains(r.getReplicaIndex()))
+ .filter(r -> r
+ .getState() == StorageContainerDatanodeProtocolProtos
+ .ContainerReplicaProto.State.CLOSED)
+ .filter(r -> ReplicationManager
+ .getNodeStatus(r.getDatanodeDetails(), nodeManager).isHealthy())
+ .filter(r -> !deletionInFlight.contains(r.getDatanodeDetails()))
+ .forEach(r -> {
+ int index = r.getReplicaIndex();
+ index2replicas.computeIfAbsent(index, k -> new LinkedList<>());
+ index2replicas.get(index).add(r);
+ });
+
+ if (index2replicas.size() > 0) {
+ final Map<DatanodeDetails, SCMCommand<?>> commands = new HashMap<>();
+ final int replicationFactor =
+ container.getReplicationConfig().getRequiredNodes();
+ index2replicas.values().forEach(l -> {
+ Iterator<ContainerReplica> it = l.iterator();
+ Set<ContainerReplica> tempReplicaSet = new HashSet<>(replicas);
+ while (it.hasNext() && l.size() > 1) {
+ ContainerReplica r = it.next();
+ if (isPlacementStatusActuallyEqualAfterRemove(
+ tempReplicaSet, r, replicationFactor)) {
+ DeleteContainerCommand deleteCommand =
+ new DeleteContainerCommand(container.getContainerID(), true);
+ commands.put(r.getDatanodeDetails(), deleteCommand);
+ it.remove();
+ tempReplicaSet.remove(r);
+ }
+ }
+ });
+ return commands;
+ }
+
+ return emptyMap();
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
index 64cdadf266..0f6806e68b 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ECUnderReplicationHandler.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.ozone.protocol.commands.ReconstructECContainersCommand;
import org.apache.hadoop.ozone.protocol.commands.ReplicateContainerCommand;
@@ -51,7 +50,7 @@ import static java.util.Collections.emptyMap;
* Handles the EC Under replication processing and forming the respective SCM
* commands.
*/
-public class ECUnderReplicationHandler implements UnderReplicationHandler {
+public class ECUnderReplicationHandler implements UnhealthyReplicationHandler {
public static final Logger LOG =
LoggerFactory.getLogger(ECUnderReplicationHandler.class);
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
index 33e697867c..536c21a37d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/LegacyReplicationManager.java
@@ -39,7 +39,6 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
import org.apache.hadoop.hdds.scm.container.common.helpers.MoveDataNodePair;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
index 3e125d0783..59585b4d79 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationManager.java
@@ -31,8 +31,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
-import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;
+
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport;
import org.apache.hadoop.hdds.scm.container.ReplicationManagerReport.HealthState;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/UnderReplicationHandler.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/UnhealthyReplicationHandler.java
similarity index 97%
rename from hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/UnderReplicationHandler.java
rename to hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/UnhealthyReplicationHandler.java
index e0b71d5f65..cf27fcb51c 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/UnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/UnhealthyReplicationHandler.java
@@ -29,7 +29,7 @@ import java.util.Set;
* This interface to create respective commands after processing the replicas
* with pending ops and health check results.
*/
-public interface UnderReplicationHandler {
+public interface UnhealthyReplicationHandler {
/**
* Identify a new set of datanode(s) to replicate/reconstruct the container
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index fc5e2c7306..e676fc1ced 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
index d616ad482b..c220223805 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/ReplicationTestUtil.java
@@ -19,16 +19,23 @@ package org.apache.hadoop.hdds.scm.container.replication;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
+import java.util.ArrayList;
import java.util.HashSet;
+import java.util.List;
import java.util.Set;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
@@ -112,4 +119,27 @@ public final class ReplicationTestUtil {
}
return replica;
}
+
+ public static PlacementPolicy getSimpleTestPlacementPolicy(
+ final NodeManager nodeManager, final OzoneConfiguration conf) {
+ return new SCMCommonPlacementPolicy(nodeManager, conf) {
+ @Override
+ public List<DatanodeDetails> chooseDatanodes(
+ List<DatanodeDetails> excludedNodes,
+ List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
+ long metadataSizeRequired, long dataSizeRequired)
+ throws SCMException {
+ List<DatanodeDetails> dns = new ArrayList<>();
+ for (int i = 0; i < nodesRequiredToChoose; i++) {
+ dns.add(MockDatanodeDetails.randomDatanodeDetails());
+ }
+ return dns;
+ }
+
+ @Override
+ public DatanodeDetails chooseNode(List<DatanodeDetails> healthyNodes) {
+ return null;
+ }
+ };
+ }
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestECContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
similarity index 98%
rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestECContainerReplicaCount.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
index 37e105fbee..8b1ac8035a 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestECContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECContainerReplicaCount.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.container.states;
+package org.apache.hadoop.hdds.scm.container.replication;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
@@ -25,9 +25,6 @@ import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ECContainerReplicaCount;
-import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaOp;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil;
import org.jetbrains.annotations.NotNull;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeEach;
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java
new file mode 100644
index 0000000000..7b0560d2f2
--- /dev/null
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECOverReplicationHandler.java
@@ -0,0 +1,159 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds.scm.container.replication;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableMap;
+import org.apache.commons.lang3.tuple.Pair;
+import org.apache.hadoop.hdds.client.ECReplicationConfig;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.PlacementPolicy;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.MockNodeManager;
+import org.apache.hadoop.hdds.scm.net.NodeSchema;
+import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import org.apache.hadoop.hdds.scm.node.NodeStatus;
+import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.ozone.container.common.SCMTestUtils;
+import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
+import org.junit.Assert;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
+import org.mockito.Mockito;
+
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.LEAF_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.RACK_SCHEMA;
+import static org.apache.hadoop.hdds.scm.net.NetConstants.ROOT_SCHEMA;
+
+/**
+ * Tests the ECOverReplicationHandling functionality.
+ */
+public class TestECOverReplicationHandler {
+ private ECReplicationConfig repConfig;
+ private ContainerInfo container;
+ private NodeManager nodeManager;
+ private OzoneConfiguration conf;
+ private PlacementPolicy policy;
+
+ @BeforeEach
+ public void setup() {
+ nodeManager = new MockNodeManager(true, 10) {
+ @Override
+ public NodeStatus getNodeStatus(DatanodeDetails dd)
+ throws NodeNotFoundException {
+ return NodeStatus.inServiceHealthy();
+ }
+ };
+ conf = SCMTestUtils.getConf();
+ repConfig = new ECReplicationConfig(3, 2);
+ container = ReplicationTestUtil
+ .createContainer(HddsProtos.LifeCycleState.CLOSED, repConfig);
+ policy = ReplicationTestUtil
+ .getSimpleTestPlacementPolicy(nodeManager, conf);
+ NodeSchema[] schemas =
+ new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
+ NodeSchemaManager.getInstance().init(schemas, true);
+ }
+
+ @Test
+ public void testNoOverReplication() {
+ Set<ContainerReplica> availableReplicas = ReplicationTestUtil
+ .createReplicas(Pair.of(IN_SERVICE, 1),
+ Pair.of(IN_SERVICE, 2), Pair.of(IN_SERVICE, 3),
+ Pair.of(IN_SERVICE, 4), Pair.of(IN_SERVICE, 5));
+ testOverReplicationWithIndexes(availableReplicas, Collections.emptyMap());
+ }
+
+ @Test
+ public void testOverReplicationWithOneSameIndexes() {
+ Set<ContainerReplica> availableReplicas = ReplicationTestUtil
+ .createReplicas(Pair.of(IN_SERVICE, 1),
+ Pair.of(IN_SERVICE, 1), Pair.of(IN_SERVICE, 1),
+ Pair.of(IN_SERVICE, 2), Pair.of(IN_SERVICE, 3),
+ Pair.of(IN_SERVICE, 4), Pair.of(IN_SERVICE, 5));
+
+ testOverReplicationWithIndexes(availableReplicas,
+ //num of index 1 is 3, but it should be 1, so 2 excess
+ new ImmutableMap.Builder<Integer, Integer>().put(1, 2).build());
+ }
+
+ @Test
+ public void testOverReplicationWithMultiSameIndexes() {
+ Set<ContainerReplica> availableReplicas = ReplicationTestUtil
+ .createReplicas(Pair.of(IN_SERVICE, 1),
+ Pair.of(IN_SERVICE, 1), Pair.of(IN_SERVICE, 1),
+ Pair.of(IN_SERVICE, 2), Pair.of(IN_SERVICE, 2),
+ Pair.of(IN_SERVICE, 2), Pair.of(IN_SERVICE, 3),
+ Pair.of(IN_SERVICE, 3), Pair.of(IN_SERVICE, 3),
+ Pair.of(IN_SERVICE, 4), Pair.of(IN_SERVICE, 4),
+ Pair.of(IN_SERVICE, 5), Pair.of(IN_SERVICE, 5));
+
+ testOverReplicationWithIndexes(availableReplicas,
+ //num of index 1 is 3, but it should be 1, so 2 excess
+ new ImmutableMap.Builder<Integer, Integer>()
+ .put(1, 2).put(2, 2).put(3, 2).put(4, 1)
+ .put(5, 1).build());
+ }
+
+ private void testOverReplicationWithIndexes(
+ Set<ContainerReplica> availableReplicas,
+ Map<Integer, Integer> index2excessNum) {
+ ECOverReplicationHandler ecORH =
+ new ECOverReplicationHandler(policy, nodeManager);
+ ContainerHealthResult.OverReplicatedHealthResult result =
+ Mockito.mock(ContainerHealthResult.OverReplicatedHealthResult.class);
+ Mockito.when(result.getContainerInfo()).thenReturn(container);
+
+ Map<DatanodeDetails, SCMCommand<?>> commands = ecORH
+ .processAndCreateCommands(availableReplicas, ImmutableList.of(),
+ result, 1);
+
+ // total commands send out should be equal to the sum of all
+ // the excess nums
+ int totalDeleteCommandNum =
+ index2excessNum.values().stream().reduce(0, Integer::sum);
+ Assert.assertEquals(totalDeleteCommandNum, commands.size());
+
+ // command num of each index should be equal to the excess num
+ // of this index
+ Map<DatanodeDetails, Integer> datanodeDetails2Index =
+ availableReplicas.stream().collect(Collectors.toMap(
+ ContainerReplica::getDatanodeDetails,
+ ContainerReplica::getReplicaIndex));
+ Map<Integer, Integer> index2commandNum = new HashMap<>();
+ commands.keySet().forEach(dd ->
+ index2commandNum.merge(datanodeDetails2Index.get(dd), 1, Integer::sum)
+ );
+
+ index2commandNum.keySet().forEach(i -> {
+ Assert.assertTrue(index2excessNum.containsKey(i));
+ Assert.assertEquals(index2commandNum.get(i), index2excessNum.get(i));
+ });
+ }
+}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestECUnderReplicationHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
similarity index 86%
rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestECUnderReplicationHandler.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
index bdadf78091..014306af27 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestECUnderReplicationHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestECUnderReplicationHandler.java
@@ -15,24 +15,18 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.container.states;
+package org.apache.hadoop.hdds.scm.container.replication;
import com.google.common.collect.ImmutableList;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.scm.PlacementPolicy;
-import org.apache.hadoop.hdds.scm.SCMCommonPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
-import org.apache.hadoop.hdds.scm.container.replication.ContainerHealthResult;
-import org.apache.hadoop.hdds.scm.container.replication.ECUnderReplicationHandler;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationTestUtil;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
import org.apache.hadoop.hdds.scm.net.NodeSchema;
import org.apache.hadoop.hdds.scm.net.NodeSchemaManager;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -48,7 +42,6 @@ import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
-import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
@@ -83,25 +76,8 @@ public class TestECUnderReplicationHandler {
repConfig = new ECReplicationConfig(3, 2);
container = ReplicationTestUtil
.createContainer(HddsProtos.LifeCycleState.CLOSED, repConfig);
- policy = new SCMCommonPlacementPolicy(nodeManager, conf) {
- @Override
- public List<DatanodeDetails> chooseDatanodes(
- List<DatanodeDetails> excludedNodes,
- List<DatanodeDetails> favoredNodes, int nodesRequiredToChoose,
- long metadataSizeRequired, long dataSizeRequired)
- throws SCMException {
- List<DatanodeDetails> dns = new ArrayList<>();
- for (int i = 0; i < nodesRequiredToChoose; i++) {
- dns.add(MockDatanodeDetails.randomDatanodeDetails());
- }
- return dns;
- }
-
- @Override
- public DatanodeDetails chooseNode(List<DatanodeDetails> healthyNodes) {
- return null;
- }
- };
+ policy = ReplicationTestUtil
+ .getSimpleTestPlacementPolicy(nodeManager, conf);
NodeSchema[] schemas =
new NodeSchema[] {ROOT_SCHEMA, RACK_SCHEMA, LEAF_SCHEMA};
NodeSchemaManager.getInstance().init(schemas, true);
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestRatisContainerReplicaCount.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java
similarity index 96%
rename from hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestRatisContainerReplicaCount.java
rename to hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java
index 5e5767c8ec..5ceaec39bf 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/states/TestRatisContainerReplicaCount.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestRatisContainerReplicaCount.java
@@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdds.scm.container.states;
+package org.apache.hadoop.hdds.scm.container.replication;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
@@ -29,22 +29,16 @@ import org.junit.jupiter.api.Test;
import java.util.HashSet;
import java.util.Set;
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
- .NodeOperationalState.DECOMMISSIONED;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
- .NodeOperationalState.DECOMMISSIONING;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
- .NodeOperationalState.ENTERING_MAINTENANCE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
- .NodeOperationalState.IN_MAINTENANCE;
-import static org.apache.hadoop.hdds.protocol.proto.HddsProtos
- .NodeOperationalState.IN_SERVICE;
-import static org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_MAINTENANCE;
+import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.IN_SERVICE;
+import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Class used to test the RatisContainerReplicaCount class.
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 15f62c3aa3..8afe2a1810 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.container.RatisContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
import org.apache.hadoop.hdds.scm.container.SimpleMockNodeManager;
import org.apache.hadoop.hdds.scm.events.SCMEvents;
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
index 9c40c84339..b2c4336c7a 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestDecommissionAndMaintenance.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerManager;
import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
-import org.apache.hadoop.hdds.scm.container.ContainerReplicaCount;
+import org.apache.hadoop.hdds.scm.container.replication.ContainerReplicaCount;
import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager.ReplicationManagerConfiguration;
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org