You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by el...@apache.org on 2020/10/05 13:42:53 UTC

[hadoop-ozone] branch HDDS-1880-Decom updated: HDDS-4300. Removed unneeded class DatanodeAdminNodeDetails (#1465)

This is an automated email from the ASF dual-hosted git repository.

elek pushed a commit to branch HDDS-1880-Decom
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/HDDS-1880-Decom by this push:
     new f43a370  HDDS-4300. Removed unneeded class DatanodeAdminNodeDetails (#1465)
f43a370 is described below

commit f43a370169f2d8cc2b8635ac9e026278157b16db
Author: Stephen O'Donnell <st...@gmail.com>
AuthorDate: Mon Oct 5 14:42:41 2020 +0100

    HDDS-4300. Removed unneeded class DatanodeAdminNodeDetails (#1465)
---
 .../hdds/scm/node/DatanodeAdminMonitorImpl.java    | 105 ++++++++--------
 .../hdds/scm/node/DatanodeAdminNodeDetails.java    | 137 ---------------------
 .../hdds/scm/node/TestDatanodeAdminMonitor.java    |  43 +++----
 .../scm/node/TestDatanodeAdminNodeDetails.java     |  81 ------------
 4 files changed, 67 insertions(+), 299 deletions(-)

diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
index f9d1a32..0bbd13d 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminMonitorImpl.java
@@ -64,9 +64,9 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
   private EventPublisher eventQueue;
   private NodeManager nodeManager;
   private ReplicationManager replicationManager;
-  private Queue<DatanodeAdminNodeDetails> pendingNodes = new ArrayDeque();
-  private Queue<DatanodeAdminNodeDetails> cancelledNodes = new ArrayDeque();
-  private Set<DatanodeAdminNodeDetails> trackedNodes = new HashSet<>();
+  private Queue<DatanodeDetails> pendingNodes = new ArrayDeque();
+  private Queue<DatanodeDetails> cancelledNodes = new ArrayDeque();
+  private Set<DatanodeDetails> trackedNodes = new HashSet<>();
 
   private static final Logger LOG =
       LoggerFactory.getLogger(DatanodeAdminMonitorImpl.class);
@@ -93,10 +93,8 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
    */
   @Override
   public synchronized void startMonitoring(DatanodeDetails dn, int endInHours) {
-    DatanodeAdminNodeDetails nodeDetails =
-        new DatanodeAdminNodeDetails(dn, endInHours);
-    cancelledNodes.remove(nodeDetails);
-    pendingNodes.add(nodeDetails);
+    cancelledNodes.remove(dn);
+    pendingNodes.add(dn);
   }
 
   /**
@@ -108,9 +106,8 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
    */
   @Override
   public synchronized void stopMonitoring(DatanodeDetails dn) {
-    DatanodeAdminNodeDetails nodeDetails = new DatanodeAdminNodeDetails(dn, 0);
-    pendingNodes.remove(nodeDetails);
-    cancelledNodes.add(nodeDetails);
+    pendingNodes.remove(dn);
+    cancelledNodes.add(dn);
   }
 
   /**
@@ -155,20 +152,19 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
   }
 
   @VisibleForTesting
-  public Set<DatanodeAdminNodeDetails> getTrackedNodes() {
+  public Set<DatanodeDetails> getTrackedNodes() {
     return trackedNodes;
   }
 
   private void processCancelledNodes() {
     while (!cancelledNodes.isEmpty()) {
-      DatanodeAdminNodeDetails dn = cancelledNodes.poll();
+      DatanodeDetails dn = cancelledNodes.poll();
       try {
         stopTrackingNode(dn);
         putNodeBackInService(dn);
-        LOG.info("Recommissioned node {}", dn.getDatanodeDetails());
+        LOG.info("Recommissioned node {}", dn);
       } catch (NodeNotFoundException e) {
-        LOG.warn("Failed processing the cancel admin request for {}",
-            dn.getDatanodeDetails(), e);
+        LOG.warn("Failed processing the cancel admin request for {}", dn, e);
       }
     }
   }
@@ -180,11 +176,11 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
   }
 
   private void processTransitioningNodes() {
-    Iterator<DatanodeAdminNodeDetails> iterator = trackedNodes.iterator();
+    Iterator<DatanodeDetails> iterator = trackedNodes.iterator();
     while (iterator.hasNext()) {
-      DatanodeAdminNodeDetails dn = iterator.next();
+      DatanodeDetails dn = iterator.next();
       try {
-        NodeStatus status = getNodeStatus(dn.getDatanodeDetails());
+        NodeStatus status = getNodeStatus(dn);
 
         if (!shouldContinueWorkflow(dn, status)) {
           abortWorkflow(dn);
@@ -193,7 +189,7 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
         }
 
         if (status.isMaintenance()) {
-          if (dn.shouldMaintenanceEnd()) {
+          if (status.operationalStateExpired()) {
             completeMaintenance(dn);
             iterator.remove();
             continue;
@@ -205,12 +201,12 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
               // Ensure the DN has received and persisted the current maint
               // state.
               && status.getOperationalState()
-                  == dn.getDatanodeDetails().getPersistedOpState()
+                  == dn.getPersistedOpState()
               && checkContainersReplicatedOnNode(dn)) {
             // CheckContainersReplicatedOnNode may take a short time to run
             // so after it completes, re-get the nodestatus to check the health
             // and ensure the state is still good to continue
-            status = getNodeStatus(dn.getDatanodeDetails());
+            status = getNodeStatus(dn);
             if (status.isDead()) {
               LOG.warn("Datanode {} is dead and the admin workflow cannot " +
                   "continue. The node will be put back to IN_SERVICE and " +
@@ -228,7 +224,7 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
 
       } catch (NodeNotFoundException e) {
         LOG.error("An unexpected error occurred processing datanode {}. " +
-            "Aborting the admin workflow", dn.getDatanodeDetails(), e);
+            "Aborting the admin workflow", dn, e);
         abortWorkflow(dn);
         iterator.remove();
       }
@@ -244,43 +240,43 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
    * @param nodeStatus The current NodeStatus for the datanode
    * @return True if admin can continue, false otherwise
    */
-  private boolean shouldContinueWorkflow(DatanodeAdminNodeDetails dn,
+  private boolean shouldContinueWorkflow(DatanodeDetails dn,
       NodeStatus nodeStatus) {
     if (!nodeStatus.isDecommission() && !nodeStatus.isMaintenance()) {
       LOG.warn("Datanode {} has an operational state of {} when it should " +
               "be undergoing decommission or maintenance. Aborting admin for " +
-              "this node.",
-          dn.getDatanodeDetails(), nodeStatus.getOperationalState());
+              "this node.", dn, nodeStatus.getOperationalState());
       return false;
     }
     if (nodeStatus.isDead() && !nodeStatus.isInMaintenance()) {
       LOG.error("Datanode {} is dead but is not IN_MAINTENANCE. Aborting the " +
-          "admin workflow for this node", dn.getDatanodeDetails());
+          "admin workflow for this node", dn);
       return false;
     }
     return true;
   }
 
-  private boolean checkPipelinesClosedOnNode(DatanodeAdminNodeDetails dn) {
-    DatanodeDetails dnd = dn.getDatanodeDetails();
-    Set<PipelineID> pipelines = nodeManager.getPipelines(dnd);
+  private boolean checkPipelinesClosedOnNode(DatanodeDetails dn)
+      throws NodeNotFoundException {
+    Set<PipelineID> pipelines = nodeManager.getPipelines(dn);
+    NodeStatus status = nodeManager.getNodeStatus(dn);
     if (pipelines == null || pipelines.size() == 0
-        || dn.shouldMaintenanceEnd()) {
+        || status.operationalStateExpired()) {
       return true;
     } else {
       LOG.info("Waiting for pipelines to close for {}. There are {} " +
-          "pipelines", dnd, pipelines.size());
+          "pipelines", dn, pipelines.size());
       return false;
     }
   }
 
-  private boolean checkContainersReplicatedOnNode(DatanodeAdminNodeDetails dn)
+  private boolean checkContainersReplicatedOnNode(DatanodeDetails dn)
       throws NodeNotFoundException {
     int sufficientlyReplicated = 0;
     int underReplicated = 0;
     int unhealthy = 0;
     Set<ContainerID> containers =
-        nodeManager.getContainers(dn.getDatanodeDetails());
+        nodeManager.getContainers(dn);
     for (ContainerID cid : containers) {
       try {
         ContainerReplicaCount replicaSet =
@@ -295,46 +291,40 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
         }
       } catch (ContainerNotFoundException e) {
         LOG.warn("ContainerID {} present in node list for {} but not found " +
-            "in containerManager", cid, dn.getDatanodeDetails());
+            "in containerManager", cid, dn);
       }
     }
-    dn.setSufficientlyReplicatedContainers(sufficientlyReplicated);
-    dn.setUnderReplicatedContainers(underReplicated);
-    dn.setUnHealthyContainers(unhealthy);
-
     return underReplicated == 0 && unhealthy == 0;
   }
 
-  private void completeDecommission(DatanodeAdminNodeDetails dn)
+  private void completeDecommission(DatanodeDetails dn)
       throws NodeNotFoundException {
     setNodeOpState(dn, NodeOperationalState.DECOMMISSIONED);
     LOG.info("Datanode {} has completed the admin workflow. The operational " +
-            "state has been set to {}", dn.getDatanodeDetails(),
+            "state has been set to {}", dn,
         NodeOperationalState.DECOMMISSIONED);
   }
 
-  private void putIntoMaintenance(DatanodeAdminNodeDetails dn)
+  private void putIntoMaintenance(DatanodeDetails dn)
       throws NodeNotFoundException {
-    LOG.info("Datanode {} has entered maintenance", dn.getDatanodeDetails());
+    LOG.info("Datanode {} has entered maintenance", dn);
     setNodeOpState(dn, NodeOperationalState.IN_MAINTENANCE);
   }
 
-  private void completeMaintenance(DatanodeAdminNodeDetails dn)
+  private void completeMaintenance(DatanodeDetails dn)
       throws NodeNotFoundException {
     // The end state of Maintenance is to put the node back IN_SERVICE, whether
     // it is dead or not.
-    LOG.info("Datanode {} has ended maintenance automatically",
-        dn.getDatanodeDetails());
+    LOG.info("Datanode {} has ended maintenance automatically", dn);
     putNodeBackInService(dn);
   }
 
-  private void startTrackingNode(DatanodeAdminNodeDetails dn) {
-    eventQueue.fireEvent(SCMEvents.START_ADMIN_ON_NODE,
-        dn.getDatanodeDetails());
+  private void startTrackingNode(DatanodeDetails dn) {
+    eventQueue.fireEvent(SCMEvents.START_ADMIN_ON_NODE, dn);
     trackedNodes.add(dn);
   }
 
-  private void stopTrackingNode(DatanodeAdminNodeDetails dn) {
+  private void stopTrackingNode(DatanodeDetails dn) {
     trackedNodes.remove(dn);
   }
 
@@ -345,24 +335,29 @@ public class DatanodeAdminMonitorImpl implements DatanodeAdminMonitor {
    *
    * @param dn The datanode for which to abort tracking
    */
-  private void abortWorkflow(DatanodeAdminNodeDetails dn) {
+  private void abortWorkflow(DatanodeDetails dn) {
     try {
       putNodeBackInService(dn);
     } catch (NodeNotFoundException e) {
       LOG.error("Unable to set the node OperationalState for {} while " +
-          "aborting the datanode admin workflow", dn.getDatanodeDetails());
+          "aborting the datanode admin workflow", dn);
     }
   }
 
-  private void putNodeBackInService(DatanodeAdminNodeDetails dn)
+  private void putNodeBackInService(DatanodeDetails dn)
       throws NodeNotFoundException {
     setNodeOpState(dn, NodeOperationalState.IN_SERVICE);
   }
 
-  private void setNodeOpState(DatanodeAdminNodeDetails dn,
+  private void setNodeOpState(DatanodeDetails dn,
       HddsProtos.NodeOperationalState state) throws NodeNotFoundException {
-    nodeManager.setNodeOperationalState(dn.getDatanodeDetails(), state,
-        dn.getMaintenanceEnd() / 1000);
+    long expiry = 0;
+    if ((state == NodeOperationalState.IN_MAINTENANCE)
+        || (state == NodeOperationalState.ENTERING_MAINTENANCE)) {
+      NodeStatus status = nodeManager.getNodeStatus(dn);
+      expiry = status.getOpStateExpiryEpochSeconds();
+    }
+    nodeManager.setNodeOperationalState(dn, state, expiry);
   }
 
   private NodeStatus getNodeStatus(DatanodeDetails dnd)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminNodeDetails.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminNodeDetails.java
deleted file mode 100644
index 9c8a905..0000000
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/DatanodeAdminNodeDetails.java
+++ /dev/null
@@ -1,137 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import com.google.common.annotations.VisibleForTesting;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is used by the DatanodeAdminMonitor to track the state and
- * details for Datanode decommission and maintenance. It provides a wrapper
- * around a DatanodeDetails object adding some additional states and helper
- * methods related to the admin workflow.
- */
-public class DatanodeAdminNodeDetails {
-  private DatanodeDetails datanodeDetails;
-  private long maintenanceEndTime;
-  private long enteredStateAt = 0;
-  private int unHealthyContainers = 0;
-  private int underReplicatedContainers = 0;
-  private int sufficientlyReplicatedContainers = 0;
-
-  private static final Logger LOG =
-      LoggerFactory.getLogger(DatanodeAdminNodeDetails.class);
-
-
-  /**
-   * Create a new object given the DatanodeDetails and the maintenance endtime.
-   * @param dn The datanode going through the admin workflow
-   * @param maintenanceEnd The number of hours from 'now', when maintenance
-   *                       should end automatically. Passing zero indicates
-   *                       indicates maintenance will never end automatically.
-   */
-  DatanodeAdminNodeDetails(DatanodeDetails dn, long maintenanceEnd) {
-    datanodeDetails = dn;
-    setMaintenanceEnd(maintenanceEnd);
-    enteredStateAt = System.currentTimeMillis();
-  }
-
-  public boolean shouldMaintenanceEnd() {
-    if (0 == maintenanceEndTime) {
-      return false;
-    }
-    return System.currentTimeMillis() >= maintenanceEndTime;
-  }
-
-  public DatanodeDetails getDatanodeDetails() {
-    return datanodeDetails;
-  }
-
-  public void setUnHealthyContainers(int val) {
-    this.unHealthyContainers = val;
-  }
-
-  public void setUnderReplicatedContainers(int val) {
-    this.underReplicatedContainers = val;
-  }
-
-  public void setSufficientlyReplicatedContainers(int val) {
-    this.sufficientlyReplicatedContainers = val;
-  }
-
-  public int getUnHealthyContainers()  {
-    return unHealthyContainers;
-  }
-
-  public int getUnderReplicatedContainers() {
-    return underReplicatedContainers;
-  }
-
-  public int getSufficientlyReplicatedContainers() {
-    return sufficientlyReplicatedContainers;
-  }
-
-  /**
-   * Set the number of hours after which maintenance should end. Passing zero
-   * indicates maintenance will never end automatically. It is possible to pass
-   * a negative number of hours can be passed for testing purposes.
-   * @param hoursFromNow The number of hours from now when maintenance should
-   *                     end, or zero for it to never end.
-   */
-  @VisibleForTesting
-  public void setMaintenanceEnd(long hoursFromNow) {
-    if (0 == hoursFromNow) {
-      maintenanceEndTime = 0;
-      return;
-    }
-    // Convert hours to ms
-    long msFromNow = hoursFromNow * 60L * 60L * 1000L;
-    maintenanceEndTime = System.currentTimeMillis() + msFromNow;
-  }
-
-  /**
-   * Returns the maintenance end time as milli seconds from the epoch.
-   * @return The maintenance end time, or zero if no end time is set.
-   */
-  public long getMaintenanceEnd() {
-    return maintenanceEndTime;
-  }
-
-  /**
-   * Matches only on the DatanodeDetails field, which compares only the UUID
-   * of the node to determine of they are the same object or not.
-   *
-   * @param o The object to compare this with
-   * @return True if the object match, otherwise false
-   *
-   */
-  @Override
-  public boolean equals(Object o) {
-    return o instanceof DatanodeAdminNodeDetails &&
-        datanodeDetails.equals(
-            ((DatanodeAdminNodeDetails) o).getDatanodeDetails());
-  }
-
-  @Override
-  public int hashCode() {
-    return datanodeDetails.hashCode();
-  }
-
-}
\ No newline at end of file
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index 0592f94..e0f23ae 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -181,7 +181,7 @@ public class TestDatanodeAdminMonitor {
     // REPLICATE_CONTAINERS as there are no pipelines to close.
     monitor.startMonitoring(dn1, 0);
     monitor.run();
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertEquals(1, monitor.getTrackedNodeCount());
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(dn1).getOperationalState());
@@ -191,9 +191,6 @@ public class TestDatanodeAdminMonitor {
     monitor.run();
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(dn1).getOperationalState());
-    assertEquals(0, node.getSufficientlyReplicatedContainers());
-    assertEquals(0, node.getUnHealthyContainers());
-    assertEquals(3, node.getUnderReplicatedContainers());
 
     // Now change the replicationManager mock to return 3 CLOSED replicas
     // and the node should complete the REPLICATE_CONTAINERS step, moving to
@@ -207,10 +204,7 @@ public class TestDatanodeAdminMonitor {
     monitor.run();
 
     assertEquals(0, monitor.getTrackedNodeCount());
-    assertEquals(3, node.getSufficientlyReplicatedContainers());
-    assertEquals(0, node.getUnHealthyContainers());
-    assertEquals(0, node.getUnderReplicatedContainers());
-    assertEquals(DECOMMISSIONED,
+    assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONED,
         nodeManager.getNodeStatus(dn1).getOperationalState());
   }
 
@@ -234,10 +228,9 @@ public class TestDatanodeAdminMonitor {
     monitor.startMonitoring(dn1, 0);
     monitor.run();
     assertEquals(1, monitor.getTrackedNodeCount());
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(dn1).getOperationalState());
-    assertEquals(3, node.getUnderReplicatedContainers());
 
     // Set the node to dead, and then the workflow should get aborted, setting
     // the node state back to IN_SERVICE on the next run.
@@ -268,10 +261,9 @@ public class TestDatanodeAdminMonitor {
     monitor.startMonitoring(dn1, 0);
     monitor.run();
     assertEquals(1, monitor.getTrackedNodeCount());
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertEquals(HddsProtos.NodeOperationalState.DECOMMISSIONING,
         nodeManager.getNodeStatus(dn1).getOperationalState());
-    assertEquals(3, node.getUnderReplicatedContainers());
 
     // Set the node to dead, and then the workflow should get aborted, setting
     // the node state back to IN_SERVICE.
@@ -297,8 +289,7 @@ public class TestDatanodeAdminMonitor {
     monitor.startMonitoring(dn1, 1);
     monitor.run();
     assertEquals(1, monitor.getTrackedNodeCount());
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
-    assertEquals(0, node.getUnderReplicatedContainers());
+    DatanodeDetails node = getFirstTrackedNode();
     assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
 
     // Running the monitor again causes the node to remain in maintenance
@@ -308,7 +299,8 @@ public class TestDatanodeAdminMonitor {
 
     // Set the maintenance end time to a time in the past and then the node
     // should complete the workflow and transition to IN_SERVICE
-    node.setMaintenanceEnd(-1);
+    nodeManager.setNodeOperationalState(node,
+        HddsProtos.NodeOperationalState.IN_MAINTENANCE, -1);
     monitor.run();
     assertEquals(0, monitor.getTrackedNodeCount());
     assertEquals(IN_SERVICE,
@@ -327,13 +319,14 @@ public class TestDatanodeAdminMonitor {
     // Add the node to the monitor
     monitor.startMonitoring(dn1, 1);
     monitor.run();
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertEquals(1, monitor.getTrackedNodeCount());
     assertTrue(nodeManager.getNodeStatus(dn1).isEnteringMaintenance());
 
     // Set the maintenance end time to the past and the node should complete
     // the workflow and return to IN_SERVICE
-    node.setMaintenanceEnd(-1);
+    nodeManager.setNodeOperationalState(node,
+        HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, -1);
     monitor.run();
     assertEquals(0, monitor.getTrackedNodeCount());
     assertEquals(IN_SERVICE,
@@ -361,11 +354,11 @@ public class TestDatanodeAdminMonitor {
     monitor.startMonitoring(dn1, 1);
     monitor.run();
     assertEquals(1, monitor.getTrackedNodeCount());
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertTrue(nodeManager.getNodeStatus(dn1).isEnteringMaintenance());
-    assertEquals(3, node.getUnderReplicatedContainers());
 
-    node.setMaintenanceEnd(-1);
+    nodeManager.setNodeOperationalState(node,
+        HddsProtos.NodeOperationalState.ENTERING_MAINTENANCE, -1);
     monitor.run();
     assertEquals(0, monitor.getTrackedNodeCount());
     assertEquals(IN_SERVICE,
@@ -385,9 +378,8 @@ public class TestDatanodeAdminMonitor {
     monitor.startMonitoring(dn1, 1);
     monitor.run();
     assertEquals(1, monitor.getTrackedNodeCount());
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
-    assertEquals(0, node.getUnderReplicatedContainers());
 
     // Set the node dead and ensure the workflow does not end
     NodeStatus status = nodeManager.getNodeStatus(dn1);
@@ -413,9 +405,8 @@ public class TestDatanodeAdminMonitor {
     monitor.startMonitoring(dn1, 1);
     monitor.run();
     assertEquals(1, monitor.getTrackedNodeCount());
-    DatanodeAdminNodeDetails node = getFirstTrackedNode();
+    DatanodeDetails node = getFirstTrackedNode();
     assertTrue(nodeManager.getNodeStatus(dn1).isInMaintenance());
-    assertEquals(0, node.getUnderReplicatedContainers());
 
     // Now cancel the node and run the monitor, the node should be IN_SERVICE
     monitor.stopMonitoring(dn1);
@@ -489,9 +480,9 @@ public class TestDatanodeAdminMonitor {
    * the monitor.
    * @return DatanodeAdminNodeDetails for the first tracked node found.
    */
-  private DatanodeAdminNodeDetails getFirstTrackedNode() {
+  private DatanodeDetails getFirstTrackedNode() {
     return
-        monitor.getTrackedNodes().toArray(new DatanodeAdminNodeDetails[0])[0];
+        monitor.getTrackedNodes().toArray(new DatanodeDetails[0])[0];
   }
 
   /**
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminNodeDetails.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminNodeDetails.java
deleted file mode 100644
index 7a813a9..0000000
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminNodeDetails.java
+++ /dev/null
@@ -1,81 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdds.scm.node;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
-
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Test;
-
-import static junit.framework.TestCase.*;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNotEquals;
-
-/**
- * Tests to validate the DatanodeAdminNodeDetails class.
- */
-public class TestDatanodeAdminNodeDetails {
-
-  private OzoneConfiguration conf;
-
-  @Before
-  public void setup() {
-    conf = new OzoneConfiguration();
-  }
-
-  @After
-  public void teardown() {
-  }
-
-  @Test
-  public void testEqualityBasedOnDatanodeDetails() {
-    DatanodeDetails dn1 = MockDatanodeDetails.randomDatanodeDetails();
-    DatanodeDetails dn2 = MockDatanodeDetails.randomDatanodeDetails();
-    DatanodeAdminNodeDetails details1 =
-        new DatanodeAdminNodeDetails(dn1, 0);
-    DatanodeAdminNodeDetails details2 =
-        new DatanodeAdminNodeDetails(dn2, 0);
-
-    assertNotEquals(details1, details2);
-    assertEquals(details1,
-        new DatanodeAdminNodeDetails(dn1, 0));
-    assertNotEquals(details1, dn1);
-  }
-
-
-
-  @Test
-  public void testMaintenanceEnd() {
-    DatanodeDetails dn = MockDatanodeDetails.randomDatanodeDetails();
-    // End in zero hours - should never end.
-    DatanodeAdminNodeDetails details = new DatanodeAdminNodeDetails(dn, 0);
-    assertFalse(details.shouldMaintenanceEnd());
-
-    // End 1 hour - maintenance should not end yet.
-    details.setMaintenanceEnd(1);
-    assertFalse(details.shouldMaintenanceEnd());
-
-    // End 1 hour ago - maintenance should end.
-    details.setMaintenanceEnd(-1);
-    assertTrue(details.shouldMaintenanceEnd());
-  }
-
-}


---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org