You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by si...@apache.org on 2021/06/22 08:09:53 UTC

[ozone] branch master updated: HDDS-5314. Show number of Open containers per Node in Recon UI (#2321)

This is an automated email from the ASF dual-hosted git repository.

siyao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new eb6c1b6  HDDS-5314. Show number of Open containers per Node in Recon UI (#2321)
eb6c1b6 is described below

commit eb6c1b6c1cbaca0d6dd01ef6e641501580ceda8c
Author: Yuan Gu <53...@users.noreply.github.com>
AuthorDate: Tue Jun 22 04:09:38 2021 -0400

    HDDS-5314. Show number of Open containers per Node in Recon UI (#2321)
    
    Contributed by Yuan Gu
---
 .../hadoop/ozone/recon/api/NodeEndpoint.java       |  17 +-
 .../ozone/recon/api/types/DatanodeMetadata.java    |  15 +
 .../ozone/recon/scm/ReconContainerManager.java     |  18 +
 .../webapps/recon/ozone-recon-web/api/db.json      |  20 +
 .../src/views/datanodes/datanodes.tsx              |  11 +
 .../hadoop/ozone/recon/api/TestEndpoints.java      |   3 +-
 .../ozone/recon/api/TestOpenContainerCount.java    | 424 +++++++++++++++++++++
 7 files changed, 505 insertions(+), 3 deletions(-)

diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
index 9d3734f..c2fb050 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java
@@ -28,11 +28,13 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
 import org.apache.hadoop.hdds.scm.pipeline.PipelineNotFoundException;
 import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata;
 import org.apache.hadoop.ozone.recon.api.types.DatanodePipeline;
 import org.apache.hadoop.ozone.recon.api.types.DatanodeStorageReport;
 import org.apache.hadoop.ozone.recon.api.types.DatanodesResponse;
 import org.apache.hadoop.ozone.recon.scm.ReconNodeManager;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
 
 import javax.inject.Inject;
 import javax.ws.rs.GET;
@@ -62,11 +64,14 @@ public class NodeEndpoint {
 
   private ReconNodeManager nodeManager;
   private ReconPipelineManager pipelineManager;
+  private ReconContainerManager reconContainerManager;
 
   @Inject
   NodeEndpoint(OzoneStorageContainerManager reconSCM) {
     this.nodeManager =
         (ReconNodeManager) reconSCM.getScmNodeManager();
+    this.reconContainerManager = 
+        (ReconContainerManager) reconSCM.getContainerManager();
     this.pipelineManager = (ReconPipelineManager) reconSCM.getPipelineManager();
   }
 
@@ -92,7 +97,9 @@ public class NodeEndpoint {
       Set<PipelineID> pipelineIDs = nodeManager.getPipelines(datanode);
       List<DatanodePipeline> pipelines = new ArrayList<>();
       AtomicInteger leaderCount = new AtomicInteger();
+      AtomicInteger openContainers = new AtomicInteger();
       DatanodeMetadata.Builder builder = DatanodeMetadata.newBuilder();
+
       pipelineIDs.forEach(pipelineID -> {
         try {
           Pipeline pipeline = pipelineManager.getPipeline(pipelineID);
@@ -108,6 +115,10 @@ public class NodeEndpoint {
           if (datanode.getUuid().equals(pipeline.getLeaderId())) {
             leaderCount.getAndIncrement();
           }
+          int openContainerPerPipeline =
+                  reconContainerManager.getPipelineToOpenContainer()
+                  .getOrDefault(pipelineID, 0);
+          openContainers.getAndAdd(openContainerPerPipeline);
         } catch (PipelineNotFoundException ex) {
           LOG.warn("Cannot get pipeline {} for datanode {}, pipeline not found",
               pipelineID.getId(), hostname, ex);
@@ -117,8 +128,10 @@ public class NodeEndpoint {
         }
       });
       try {
-        int containers = nodeManager.getContainers(datanode).size();
-        builder.withContainers(containers);
+        Set<ContainerID> allContainers = nodeManager.getContainers(datanode);
+
+        builder.withContainers(allContainers.size());
+        builder.withOpenContainers(openContainers.get());
       } catch (NodeNotFoundException ex) {
         LOG.warn("Cannot get containers, datanode {} not found.",
             datanode.getUuid(), ex);
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java
index bb8bf29..77c96b4 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/types/DatanodeMetadata.java
@@ -56,6 +56,9 @@ public final class DatanodeMetadata {
   @XmlElement(name = "containers")
   private int containers;
 
+  @XmlElement(name = "openContainers")
+  private int openContainers;
+
   @XmlElement(name = "leaderCount")
   private int leaderCount;
 
@@ -80,6 +83,7 @@ public final class DatanodeMetadata {
     this.datanodeStorageReport = builder.datanodeStorageReport;
     this.pipelines = builder.pipelines;
     this.containers = builder.containers;
+    this.openContainers = builder.openContainers;
     this.leaderCount = builder.leaderCount;
     this.version = builder.version;
     this.setupTime = builder.setupTime;
@@ -115,6 +119,10 @@ public final class DatanodeMetadata {
     return containers;
   }
 
+  public int getOpenContainers() {
+    return openContainers;
+  }
+
   public int getLeaderCount() {
     return leaderCount;
   }
@@ -161,6 +169,7 @@ public final class DatanodeMetadata {
     private DatanodeStorageReport datanodeStorageReport;
     private List<DatanodePipeline> pipelines;
     private int containers;
+    private int openContainers;
     private int leaderCount;
     private String version;
     private long setupTime;
@@ -169,6 +178,7 @@ public final class DatanodeMetadata {
 
     public Builder() {
       this.containers = 0;
+      this.openContainers = 0;
       this.leaderCount = 0;
     }
 
@@ -208,6 +218,11 @@ public final class DatanodeMetadata {
       return this;
     }
 
+    public Builder withOpenContainers(int openContainers) {
+      this.openContainers = openContainers;
+      return this;
+    }
+
     public Builder withLeaderCount(int leaderCount) {
       this.leaderCount = leaderCount;
       return this;
diff --git a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
index 5868d93..e295893 100644
--- a/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
+++ b/hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/scm/ReconContainerManager.java
@@ -70,6 +70,8 @@ public class ReconContainerManager extends ContainerManagerImpl {
   private final Table<UUID, DatanodeDetails> nodeDB;
   // Container ID -> Datanode UUID -> Timestamp
   private final Map<Long, Map<UUID, ContainerReplicaHistory>> replicaHistoryMap;
+  // Pipeline -> # of open containers
+  private final Map<PipelineID, Integer> pipelineToOpenContainer;
 
   /**
    * Constructs a mapping class that creates mapping between container names
@@ -101,6 +103,7 @@ public class ReconContainerManager extends ContainerManagerImpl {
     // batchHandler = scmDBStore
     this.nodeDB = ReconSCMDBDefinition.NODES.getTable(store);
     this.replicaHistoryMap = new ConcurrentHashMap<>();
+    this.pipelineToOpenContainer = new ConcurrentHashMap<>();
   }
 
   /**
@@ -203,6 +206,14 @@ public class ReconContainerManager extends ContainerManagerImpl {
         && isHealthy(state)) {
       LOG.info("Container {} has state OPEN, but given state is {}.",
           containerID, state);
+      final PipelineID pipelineID = containerInfo.getPipelineID();
+      // subtract open container count from the map
+      int curCnt = pipelineToOpenContainer.getOrDefault(pipelineID, 0);
+      if (curCnt == 1) {
+        pipelineToOpenContainer.remove(pipelineID);
+      } else if (curCnt > 0) {
+        pipelineToOpenContainer.put(pipelineID, curCnt - 1);
+      }
       updateContainerState(containerID, FINALIZE);
     }
   }
@@ -229,6 +240,9 @@ public class ReconContainerManager extends ContainerManagerImpl {
           pipelineManager.addContainerToPipeline(
               containerWithPipeline.getPipeline().getId(),
               containerInfo.containerID());
+          // update open container count on all datanodes on this pipeline
+          pipelineToOpenContainer.put(pipelineID,
+                    pipelineToOpenContainer.getOrDefault(pipelineID, 0) + 1);
           LOG.info("Successfully added container {} to Recon.",
               containerInfo.containerID());
         } else {
@@ -433,4 +447,8 @@ public class ReconContainerManager extends ContainerManagerImpl {
     return nodeDB;
   }
 
+  public Map<PipelineID, Integer> getPipelineToOpenContainer() {
+    return pipelineToOpenContainer;
+  }
+
 }
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
index eb17c9c..241e6bd 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/api/db.json
@@ -9,6 +9,7 @@
       "remaining": 12094627905536
     },
     "containers": 3230,
+    "openContainers": 3210,
     "volumes": 5,
     "buckets": 156,
     "keys": 253000
@@ -42,6 +43,7 @@
           }
         ],
         "containers": 80,
+        "openContainers": 79,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574728775759,
@@ -74,6 +76,7 @@
           }
         ],
         "containers": 8192,
+        "openContainers": 8182,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724805059,
@@ -106,6 +109,7 @@
           }
         ],
         "containers": 8192,
+        "openContainers": 8182,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724805059,
@@ -138,6 +142,7 @@
           }
         ],
         "containers": 8192,
+        "openContainers": 8182,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724805059,
@@ -170,6 +175,7 @@
           }
         ],
         "containers": 8192,
+        "openContainers": 8182,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724805059,
@@ -208,6 +214,7 @@
           }
         ],
         "containers": 43,
+        "openContainers": 42,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1343544679543,
@@ -246,6 +253,7 @@
           }
         ],
         "containers": 43,
+        "openContainers": 42,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1343544679543,
@@ -284,6 +292,7 @@
           }
         ],
         "containers": 43,
+        "openContainers": 42,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1343544679543,
@@ -322,6 +331,7 @@
           }
         ],
         "containers": 43,
+        "openContainers": 42,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1343544679543,
@@ -360,6 +370,7 @@
           }
         ],
         "containers": 43,
+        "openContainers": 42,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1343544679543,
@@ -379,6 +390,7 @@
         },
         "pipelines": [],
         "containers": 0,
+        "openContainers": 0,
         "leaderCount": 0,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1074724802059,
@@ -411,6 +423,7 @@
           }
         ],
         "containers": 643,
+        "openContainers": 632,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724816029,
@@ -443,6 +456,7 @@
           }
         ],
         "containers": 5,
+        "openContainers": 4,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724802059,
@@ -481,6 +495,7 @@
           }
         ],
         "containers": 64,
+        "openContainers": 63,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724676009,
@@ -513,6 +528,7 @@
           }
         ],
         "containers": 21,
+        "openContainers": 20,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724276050,
@@ -545,6 +561,7 @@
           }
         ],
         "containers": 897,
+        "openContainers": 896,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724573011,
@@ -583,6 +600,7 @@
           }
         ],
         "containers": 6754,
+        "openContainers": 6744,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574723756059,
@@ -615,6 +633,7 @@
           }
         ],
         "containers": 78,
+        "openContainers": 77,
         "leaderCount": 2,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1474724705783,
@@ -647,6 +666,7 @@
           }
         ],
         "containers": 543,
+        "openContainers": 540,
         "leaderCount": 1,
         "version": "0.6.0-SNAPSHOT",
         "setupTime": 1574724706232,
diff --git a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
index e08b7d9..962eefc 100644
--- a/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
+++ b/hadoop-ozone/recon/src/main/resources/webapps/recon/ozone-recon-web/src/views/datanodes/datanodes.tsx
@@ -46,6 +46,7 @@ interface IDatanodeResponse {
   storageReport: IStorageReport;
   pipelines: IPipeline[];
   containers: number;
+  openContainers: number;
   leaderCount: number;
   uuid: string;
   version: string;
@@ -69,6 +70,7 @@ interface IDatanode {
   storageRemaining: number;
   pipelines: IPipeline[];
   containers: number;
+  openContainers: number;
   leaderCount: number;
   uuid: string;
   version: string;
@@ -227,6 +229,14 @@ const COLUMNS = [
     sorter: (a: IDatanode, b: IDatanode) => a.containers - b.containers
   },
   {
+    title: 'Open Containers',
+    dataIndex: 'openContainers',
+    key: 'openContainers',
+    isVisible: true,
+    isSearchable: true,
+    sorter: (a: IDatanode, b: IDatanode) => a.openContainers - b.openContainers
+  },
+  {
     title: 'Version',
     dataIndex: 'version',
     key: 'version',
@@ -327,6 +337,7 @@ export class Datanodes extends React.Component<Record<string, object>, IDatanode
           storageRemaining: datanode.storageReport.remaining,
           pipelines: datanode.pipelines,
           containers: datanode.containers,
+          openContainers: datanode.openContainers,
           leaderCount: datanode.leaderCount,
           version: datanode.version,
           setupTime: datanode.setupTime,
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index 9ba500c..03aa184 100644
--- a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -165,6 +165,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
         .setOwner("test")
         .setPipelineID(pipeline.getId())
         .build();
+
     ContainerWithPipeline containerWithPipeline =
         new ContainerWithPipeline(containerInfo, pipeline);
 
@@ -457,6 +458,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
               .findFirst().orElse(null);
       return (datanodeMetadata1 != null &&
           datanodeMetadata1.getContainers() == 1 &&
+          datanodeMetadata1.getOpenContainers() == 1 &&
           reconScm.getPipelineManager()
               .getContainersInPipeline(pipeline.getId()).size() == 1);
     });
@@ -689,7 +691,6 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
     assertEquals(0, resultSet.size());
   }
 
-
   private void waitAndCheckConditionAfterHeartbeat(Callable<Boolean> check)
       throws Exception {
     // if container report is processed first, and pipeline does not exist
diff --git a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
new file mode 100644
index 0000000..2647c9a
--- /dev/null
+++ b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestOpenContainerCount.java
@@ -0,0 +1,424 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.recon.api;
+
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos
+        .ExtendedDatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.PipelineID;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
+import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
+import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
+import org.apache.hadoop.hdds.scm.server.OzoneStorageContainerManager;
+import org.apache.hadoop.hdfs.web.URLConnectionFactory;
+import org.apache.hadoop.ozone.recon.MetricsServiceProviderFactory;
+import org.apache.hadoop.ozone.recon.ReconTestInjector;
+import org.apache.hadoop.ozone.recon.ReconUtils;
+import org.apache.hadoop.ozone.recon.api.types.DatanodeMetadata;
+import org.apache.hadoop.ozone.recon.api.types.DatanodesResponse;
+import org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManager;
+import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
+import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
+import org.apache.hadoop.ozone.recon.spi.impl.StorageContainerServiceProviderImpl;
+import org.apache.ozone.test.LambdaTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+
+import static org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
+import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
+import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getTestReconOmMetadataManager;
+import static org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializeNewOmMetadataManager;
+import static org.mockito.ArgumentMatchers.any;
+import static org.mockito.ArgumentMatchers.anyBoolean;
+import static org.mockito.ArgumentMatchers.anyString;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Response;
+
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.util.*;
+import java.util.concurrent.Callable;
+
+/**
+ * Test for Open Container count per Datanode.
+ */
+public class TestOpenContainerCount {
+  @Rule
+  public TemporaryFolder temporaryFolder = new TemporaryFolder();
+
+  private NodeEndpoint nodeEndpoint;
+  private ReconOMMetadataManager reconOMMetadataManager;
+  private ReconStorageContainerManagerFacade reconScm;
+  private boolean isSetupDone = false;
+  private String pipelineId;
+  private String pipelineId2;
+  private DatanodeDetails datanodeDetails;
+  private String datanodeId;
+  private ContainerReportsProto containerReportsProto;
+  private Builder builder;
+  private ExtendedDatanodeDetailsProto extendedDatanodeDetailsProto;
+  private NodeReportProto nodeReportProto;
+  private PipelineReportsProto pipelineReportsProto;
+  private Pipeline pipeline;
+  private Pipeline pipeline2;
+  private static final String HOST1 = "host1.datanode";
+  private static final String IP1 = "1.1.1.1";
+  private ReconUtils reconUtilsMock;
+  private StorageContainerServiceProvider mockScmServiceProvider;
+
+  private List<Long> containerIDs;
+
+  private List<ContainerWithPipeline> cpw;
+
+  private void initializeInjector() throws Exception {
+    reconOMMetadataManager = getTestReconOmMetadataManager(
+            initializeNewOmMetadataManager(temporaryFolder.newFolder()),
+            temporaryFolder.newFolder());
+    datanodeDetails = randomDatanodeDetails();
+    datanodeDetails.setHostName(HOST1);
+    datanodeDetails.setIpAddress(IP1);
+    pipeline = getRandomPipeline(datanodeDetails);
+    pipelineId = pipeline.getId().getId().toString();
+
+    pipeline2 = getRandomPipeline(datanodeDetails);
+    pipelineId2 = pipeline2.getId().getId().toString();
+
+    StorageContainerLocationProtocol mockScmClient = mock(
+            StorageContainerLocationProtocol.class);
+    mockScmServiceProvider = mock(
+            StorageContainerServiceProviderImpl.class);
+
+    when(mockScmServiceProvider.getPipeline(
+            pipeline.getId().getProtobuf())).thenReturn(pipeline);
+    when(mockScmServiceProvider.getPipeline(
+            pipeline2.getId().getProtobuf())).thenReturn(pipeline2);
+
+    // Open 5 containers on pipeline 1
+    containerIDs = new LinkedList<>();
+    cpw = new LinkedList<>();
+    for (long i = 1L; i <= 5L; ++i) {
+      ContainerInfo containerInfo = new ContainerInfo.Builder()
+              .setContainerID(i)
+              .setReplicationConfig(
+                      new RatisReplicationConfig(ReplicationFactor.ONE))
+              .setState(LifeCycleState.OPEN)
+              .setOwner("test")
+              .setPipelineID(pipeline.getId())
+              .build();
+      ContainerWithPipeline containerWithPipeline =
+              new ContainerWithPipeline(containerInfo, pipeline);
+      when(mockScmServiceProvider.getContainerWithPipeline(i))
+              .thenReturn(containerWithPipeline);
+      containerIDs.add(i);
+      cpw.add(containerWithPipeline);
+    }
+
+    // Open 5 containers on pipeline 2
+    for (long i = 6L; i <= 10L; ++i) {
+      ContainerInfo containerInfo = new ContainerInfo.Builder()
+              .setContainerID(i)
+              .setReplicationConfig(
+                      new RatisReplicationConfig(ReplicationFactor.ONE))
+              .setState(LifeCycleState.OPEN)
+              .setOwner("test")
+              .setPipelineID(pipeline2.getId())
+              .build();
+      ContainerWithPipeline containerWithPipeline =
+              new ContainerWithPipeline(containerInfo, pipeline2);
+      when(mockScmServiceProvider.getContainerWithPipeline(i))
+              .thenReturn(containerWithPipeline);
+      containerIDs.add(i);
+      cpw.add(containerWithPipeline);
+    }
+
+    when(mockScmServiceProvider
+            .getExistContainerWithPipelinesInBatch(containerIDs))
+            .thenReturn(cpw);
+
+    reconUtilsMock = mock(ReconUtils.class);
+    HttpURLConnection urlConnectionMock = mock(HttpURLConnection.class);
+    when(urlConnectionMock.getResponseCode())
+            .thenReturn(HttpServletResponse.SC_OK);
+    when(reconUtilsMock.makeHttpCall(any(URLConnectionFactory.class),
+            anyString(), anyBoolean())).thenReturn(urlConnectionMock);
+
+    ReconTestInjector reconTestInjector =
+            new ReconTestInjector.Builder(temporaryFolder)
+                    .withReconSqlDb()
+                    .withReconOm(reconOMMetadataManager)
+                    .withOmServiceProvider(
+                            mock(OzoneManagerServiceProviderImpl.class))
+                    .addBinding(StorageContainerServiceProvider.class,
+                            mockScmServiceProvider)
+                    .addBinding(OzoneStorageContainerManager.class,
+                            ReconStorageContainerManagerFacade.class)
+                    .withContainerDB()
+                    .addBinding(NodeEndpoint.class)
+                    .addBinding(MetricsServiceProviderFactory.class)
+                    .addBinding(ContainerHealthSchemaManager.class)
+                    .addBinding(ReconUtils.class, reconUtilsMock)
+                    .addBinding(StorageContainerLocationProtocol.class,
+                            mockScmClient)
+                    .build();
+
+    nodeEndpoint = reconTestInjector.getInstance(NodeEndpoint.class);
+    reconScm = (ReconStorageContainerManagerFacade)
+            reconTestInjector.getInstance(OzoneStorageContainerManager.class);
+  }
+
+  @Before
+  public void setUp() throws Exception {
+    // The following setup runs only once
+    if (!isSetupDone) {
+      initializeInjector();
+      isSetupDone = true;
+    }
+    datanodeId = datanodeDetails.getUuid().toString();
+
+    // initialize container report
+    builder = ContainerReportsProto.newBuilder();
+    for (long i = 1L; i <= 10L; i++) {
+      builder.addReports(
+              ContainerReplicaProto.newBuilder()
+                      .setContainerID(i)
+                      .setState(ContainerReplicaProto.State.OPEN)
+                      .setOriginNodeId(datanodeId)
+                      .build()
+      );
+    }
+    containerReportsProto = builder.build();
+
+    UUID pipelineUuid = UUID.fromString(pipelineId);
+    HddsProtos.UUID uuid128 = HddsProtos.UUID.newBuilder()
+            .setMostSigBits(pipelineUuid.getMostSignificantBits())
+            .setLeastSigBits(pipelineUuid.getLeastSignificantBits())
+            .build();
+
+    UUID pipelineUuid2 = UUID.fromString(pipelineId2);
+    HddsProtos.UUID uuid1282 = HddsProtos.UUID.newBuilder()
+            .setMostSigBits(pipelineUuid2.getMostSignificantBits())
+            .setLeastSigBits(pipelineUuid2.getLeastSignificantBits())
+            .build();
+
+    PipelineReport pipelineReport = PipelineReport.newBuilder()
+            .setPipelineID(
+                    PipelineID.newBuilder()
+                            .setId(pipelineId)
+                            .setUuid128(uuid128)
+                            .build())
+            .setIsLeader(true)
+            .build();
+
+    PipelineReport pipelineReport2 = PipelineReport.newBuilder()
+            .setPipelineID(
+                    PipelineID
+                            .newBuilder()
+                            .setId(pipelineId2)
+                            .setUuid128(uuid1282)
+                            .build())
+            .setIsLeader(false)
+            .build();
+
+    pipelineReportsProto =
+            PipelineReportsProto.newBuilder()
+                    .addPipelineReport(pipelineReport)
+                    .addPipelineReport(pipelineReport2)
+                    .build();
+
+    DatanodeDetailsProto datanodeDetailsProto =
+            DatanodeDetailsProto.newBuilder()
+                    .setHostName(HOST1)
+                    .setUuid(datanodeId)
+                    .setIpAddress(IP1)
+                    .build();
+
+    extendedDatanodeDetailsProto =
+            HddsProtos.ExtendedDatanodeDetailsProto.newBuilder()
+                    .setDatanodeDetails(datanodeDetailsProto)
+                    .setVersion("0.6.0")
+                    .setSetupTime(1596347628802L)
+                    .setBuildDate("2020-08-01T08:50Z")
+                    .setRevision("3346f493fa1690358add7bb9f3e5b52545993f36")
+                    .build();
+
+    StorageReportProto storageReportProto1 =
+            StorageReportProto.newBuilder()
+                    .setStorageType(StorageTypeProto.DISK)
+                    .setStorageLocation("/disk1")
+                    .setScmUsed(10 * OzoneConsts.GB)
+                    .setRemaining(90 * OzoneConsts.GB)
+                    .setCapacity(100 * OzoneConsts.GB)
+                    .setStorageUuid(UUID.randomUUID().toString())
+                    .setFailed(false).build();
+
+    StorageReportProto storageReportProto2 =
+            StorageReportProto.newBuilder()
+                    .setStorageType(StorageTypeProto.DISK)
+                    .setStorageLocation("/disk2")
+                    .setScmUsed(10 * OzoneConsts.GB)
+                    .setRemaining(90 * OzoneConsts.GB)
+                    .setCapacity(100 * OzoneConsts.GB)
+                    .setStorageUuid(UUID.randomUUID().toString())
+                    .setFailed(false).build();
+
+    nodeReportProto =
+            NodeReportProto.newBuilder()
+                    .addStorageReport(storageReportProto1)
+                    .addStorageReport(storageReportProto2).build();
+
+    try {
+      reconScm.getDatanodeProtocolServer()
+              .register(extendedDatanodeDetailsProto, nodeReportProto,
+                      containerReportsProto, pipelineReportsProto);
+      // Process all events in the event queue
+      reconScm.getEventQueue().processAll(1000);
+    } catch (Exception ex) {
+      Assert.fail(ex.getMessage());
+    }
+  }
+
+  @Test
+  public void testOpenContainerCount() throws Exception {
+    // In case of pipeline doesn't exist
+    waitAndCheckConditionAfterHeartbeat(() -> {
+
+      DatanodeMetadata datanodeMetadata1 = getDatanodeMetadata();
+      return datanodeMetadata1.getContainers() == 10
+              && datanodeMetadata1.getPipelines().size() == 2;
+    });
+
+    DatanodeMetadata datanodeMetadata = getDatanodeMetadata();
+
+    int expectedCnt = datanodeMetadata.getOpenContainers();
+
+    // check if open container's count decrement according
+    for (long id = 1L; id <= 10L; ++id) {
+      --expectedCnt;
+      closeContainer(id);
+      DatanodeMetadata metadata = getDatanodeMetadata();
+      Assert.assertEquals(expectedCnt, metadata.getOpenContainers());
+    }
+  }
+
+  private DatanodeMetadata getDatanodeMetadata() {
+    Response response = nodeEndpoint.getDatanodes();
+    DatanodesResponse datanodesResponse =
+            (DatanodesResponse) response.getEntity();
+
+    DatanodeMetadata datanodeMetadata =
+            datanodesResponse.getDatanodes().stream().filter(metadata ->
+                    metadata.getHostname().equals("host1.datanode"))
+                    .findFirst().orElse(null);
+    return datanodeMetadata;
+  }
+
+  private void closeContainer(long containerID) throws IOException {
+
+    if (containerID >= 1L && containerID <= 5L) {
+      ContainerInfo closedContainer = new ContainerInfo.Builder()
+              .setContainerID(containerID)
+              .setReplicationConfig(
+                      new RatisReplicationConfig(ReplicationFactor.ONE))
+              .setState(LifeCycleState.CLOSED)
+              .setOwner("test")
+              .setPipelineID(pipeline.getId())
+              .build();
+      ContainerWithPipeline containerWithPipeline =
+              new ContainerWithPipeline(closedContainer, pipeline);
+      when(mockScmServiceProvider.getContainerWithPipeline(containerID))
+              .thenReturn(containerWithPipeline);
+      cpw.set((int) containerID - 1, containerWithPipeline);
+    } else if (containerID >= 6L && containerID <= 10L) {
+      ContainerInfo closedContainer = new ContainerInfo.Builder()
+              .setContainerID(containerID)
+              .setReplicationConfig(
+                      new RatisReplicationConfig(ReplicationFactor.ONE))
+              .setState(LifeCycleState.CLOSED)
+              .setOwner("test")
+              .setPipelineID(pipeline2.getId())
+              .build();
+      ContainerWithPipeline containerWithPipeline =
+              new ContainerWithPipeline(closedContainer, pipeline2);
+      when(mockScmServiceProvider.getContainerWithPipeline(containerID))
+              .thenReturn(containerWithPipeline);
+      cpw.set((int) containerID - 1, containerWithPipeline);
+    }
+    when(mockScmServiceProvider
+            .getExistContainerWithPipelinesInBatch(containerIDs))
+            .thenReturn(cpw);
+    updateContainerReport(containerID);
+  }
+
+  private void updateContainerReport(long containerId) {
+    containerReportsProto = builder.setReports((int) containerId - 1,
+            ContainerReplicaProto.newBuilder()
+                    .setContainerID(containerId)
+                    .setState(ContainerReplicaProto.State.CLOSED)
+                    .setOriginNodeId(datanodeId)
+                    .build())
+            .build();
+    try {
+      reconScm.getDatanodeProtocolServer()
+              .register(extendedDatanodeDetailsProto, nodeReportProto,
+                      containerReportsProto, pipelineReportsProto);
+      // Process all events in the event queue
+      reconScm.getEventQueue().processAll(1000);
+    } catch (Exception ex) {
+      Assert.fail(ex.getMessage());
+    }
+  }
+
+  private void waitAndCheckConditionAfterHeartbeat(Callable<Boolean> check)
+          throws Exception {
+    // if container report is processed first, and pipeline does not exist
+    // then container is not added until the next container report is processed
+    SCMHeartbeatRequestProto heartbeatRequestProto =
+            SCMHeartbeatRequestProto.newBuilder()
+                    .setContainerReport(containerReportsProto)
+                    .setDatanodeDetails(extendedDatanodeDetailsProto
+                            .getDatanodeDetails())
+                    .build();
+
+    reconScm.getDatanodeProtocolServer().sendHeartbeat(heartbeatRequestProto);
+    LambdaTestUtils.await(30000, 1000, check);
+  }
+}

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@ozone.apache.org
For additional commands, e-mail: commits-help@ozone.apache.org