You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/10/18 17:44:54 UTC

[29/50] [abbrv] hadoop git commit: HDDS-662. Introduce ContainerReplicaState in StorageContainerManager. Contributed by Nanda kumar.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
index 3523499..badcec7 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java
@@ -33,8 +33,10 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.scm.HddsServerUtil;
 import org.apache.hadoop.hdds.scm.ScmInfo;
 import org.apache.hadoop.hdds.scm.ScmUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
@@ -169,7 +171,7 @@ public class SCMClientProtocolServer implements
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(remoteUser);
     return scm.getContainerManager()
-        .getContainer(containerID);
+        .getContainer(ContainerID.valueof(containerID));
   }
 
   @Override
@@ -177,8 +179,8 @@ public class SCMClientProtocolServer implements
       throws IOException {
     if (chillModePrecheck.isInChillMode()) {
       ContainerInfo contInfo = scm.getContainerManager()
-          .getContainer(containerID);
-      if (contInfo.isContainerOpen()) {
+          .getContainer(ContainerID.valueof(containerID));
+      if (contInfo.isOpen()) {
         if (!hasRequiredReplicas(contInfo)) {
           throw new SCMException("Open container " + containerID + " doesn't"
               + " have enough replicas to service this operation in "
@@ -189,7 +191,7 @@ public class SCMClientProtocolServer implements
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(null);
     return scm.getContainerManager()
-        .getContainerWithPipeline(containerID);
+        .getContainerWithPipeline(ContainerID.valueof(containerID));
   }
 
   /**
@@ -198,10 +200,10 @@ public class SCMClientProtocolServer implements
    */
   private boolean hasRequiredReplicas(ContainerInfo contInfo) {
     try{
-      return getScm().getContainerManager().getStateManager()
+      return getScm().getContainerManager()
           .getContainerReplicas(contInfo.containerID())
           .size() >= contInfo.getReplicationFactor().getNumber();
-    } catch (SCMException ex) {
+    } catch (ContainerNotFoundException ex) {
       // getContainerReplicas throws exception if no replica's exist for given
       // container.
       return false;
@@ -212,14 +214,14 @@ public class SCMClientProtocolServer implements
   public List<ContainerInfo> listContainer(long startContainerID,
       int count) throws IOException {
     return scm.getContainerManager().
-        listContainer(startContainerID, count);
+        listContainer(ContainerID.valueof(startContainerID), count);
   }
 
   @Override
   public void deleteContainer(long containerID) throws IOException {
     String remoteUser = getRpcRemoteUsername();
     getScm().checkAdminAccess(remoteUser);
-    scm.getContainerManager().deleteContainer(containerID);
+    scm.getContainerManager().deleteContainer(ContainerID.valueof(containerID));
 
   }
 
@@ -257,10 +259,12 @@ public class SCMClientProtocolServer implements
           .ObjectStageChangeRequestProto.Op.create) {
         if (stage == StorageContainerLocationProtocolProtos
             .ObjectStageChangeRequestProto.Stage.begin) {
-          scm.getContainerManager().updateContainerState(id, HddsProtos
+          scm.getContainerManager().updateContainerState(
+              ContainerID.valueof(id), HddsProtos
               .LifeCycleEvent.CREATE);
         } else {
-          scm.getContainerManager().updateContainerState(id, HddsProtos
+          scm.getContainerManager().updateContainerState(
+              ContainerID.valueof(id), HddsProtos
               .LifeCycleEvent.CREATED);
         }
       } else {
@@ -268,10 +272,12 @@ public class SCMClientProtocolServer implements
             .ObjectStageChangeRequestProto.Op.close) {
           if (stage == StorageContainerLocationProtocolProtos
               .ObjectStageChangeRequestProto.Stage.begin) {
-            scm.getContainerManager().updateContainerState(id, HddsProtos
+            scm.getContainerManager().updateContainerState(
+                ContainerID.valueof(id), HddsProtos
                 .LifeCycleEvent.FINALIZE);
           } else {
-            scm.getContainerManager().updateContainerState(id, HddsProtos
+            scm.getContainerManager().updateContainerState(
+                ContainerID.valueof(id), HddsProtos
                 .LifeCycleEvent.CLOSE);
           }
         }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 2c96856..37c7386 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -197,7 +197,7 @@ public class SCMDatanodeProtocolServer implements
     if (registeredCommand.getError()
         == SCMRegisteredResponseProto.ErrorCode.success) {
       scm.getContainerManager().processContainerReports(datanodeDetails,
-          containerReportsProto, true);
+          containerReportsProto);
       eventPublisher.fireEvent(SCMEvents.NODE_REGISTRATION_CONT_REPORT,
           new NodeRegistrationContainerReport(datanodeDetails,
               containerReportsProto));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
index 9626105..22039e5 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java
@@ -40,13 +40,14 @@ import org.apache.hadoop.hdds.scm.command.CommandStatusReportHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler;
 import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
 import org.apache.hadoop.hdds.scm.container.ContainerActionsHandler;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerReportHandler;
 import org.apache.hadoop.hdds.scm.container.replication
     .ReplicationActivityStatus;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
     .ContainerPlacementPolicy;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms
@@ -97,9 +98,6 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.TimeUnit;
 
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DB_CACHE_SIZE_MB;
-
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
@@ -190,9 +188,6 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
    */
   private StorageContainerManager(OzoneConfiguration conf) throws IOException {
 
-    final int cacheSize = conf.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
-        OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-
     StorageContainerManager.initMetrics();
     initContainerReportCache(conf);
 
@@ -207,9 +202,9 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     scmNodeManager = new SCMNodeManager(
         conf, scmStorage.getClusterID(), this, eventQueue);
     containerManager = new SCMContainerManager(
-        conf, getScmNodeManager(), cacheSize, eventQueue);
+        conf, scmNodeManager, eventQueue);
     scmBlockManager = new BlockManagerImpl(
-        conf, getScmNodeManager(), containerManager, eventQueue);
+        conf, scmNodeManager, containerManager, eventQueue);
 
     replicationStatus = new ReplicationActivityStatus();
 
@@ -227,7 +222,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
     StaleNodeHandler staleNodeHandler =
         new StaleNodeHandler(containerManager.getPipelineSelector());
     DeadNodeHandler deadNodeHandler = new DeadNodeHandler(scmNodeManager,
-        getContainerManager().getStateManager());
+        containerManager);
     ContainerActionsHandler actionsHandler = new ContainerActionsHandler();
     PendingDeleteHandler pendingDeleteHandler =
         new PendingDeleteHandler(scmBlockManager.getSCMBlockDeletingService());
@@ -236,7 +231,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new ContainerReportHandler(containerManager, scmNodeManager,
             replicationStatus);
     scmChillModeManager = new SCMChillModeManager(conf,
-        getContainerManager().getStateManager().getAllContainers(),
+        containerManager.getContainers(),
         eventQueue);
     PipelineActionEventHandler pipelineActionEventHandler =
         new PipelineActionEventHandler();
@@ -263,8 +258,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
         new SCMContainerPlacementCapacity(scmNodeManager, conf);
 
     replicationManager = new ReplicationManager(containerPlacementPolicy,
-        containerManager.getStateManager(), eventQueue,
-        commandWatcherLeaseManager);
+        containerManager, eventQueue, commandWatcherLeaseManager);
 
     // setup CloseContainer watcher
     CloseContainerWatcher closeContainerWatcher =
@@ -632,7 +626,7 @@ public final class StorageContainerManager extends ServiceRuntimeInfoImpl
   @VisibleForTesting
   public ContainerInfo getContainerInfo(long containerID) throws
       IOException {
-    return containerManager.getContainer(containerID);
+    return containerManager.getContainer(ContainerID.valueof(containerID));
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
index 50d1eed..9b28e1e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java
@@ -22,7 +22,7 @@ import java.util.List;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeProtocolServer
     .NodeRegistrationContainerReport;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 21f00cd..9d3ec10 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -23,11 +23,10 @@ import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
 import org.apache.hadoop.hdds.scm.server
     .SCMDatanodeHeartbeatDispatcher.PipelineReportFromDatanode;
-import org.mockito.Mockito;
-import static org.mockito.Mockito.when;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
 
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerInfo;
 import org.apache.hadoop.hdds.protocol.proto
@@ -42,13 +41,8 @@ import org.apache.hadoop.hdds.protocol.proto
         .StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageTypeProto;
-import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 
@@ -412,39 +406,21 @@ public final class TestUtils {
     return report.build();
   }
 
-  public static
-      org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo
-      allocateContainer(ContainerStateManager containerStateManager)
+  public static org.apache.hadoop.hdds.scm.container.ContainerInfo
+      allocateContainer(ContainerManager containerManager)
       throws IOException {
-
-    PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class);
-
-    Pipeline pipeline = new Pipeline("leader", HddsProtos.LifeCycleState.CLOSED,
-        HddsProtos.ReplicationType.STAND_ALONE,
-        HddsProtos.ReplicationFactor.THREE,
-        PipelineID.randomId());
-
-    when(pipelineSelector
-        .getReplicationPipeline(HddsProtos.ReplicationType.STAND_ALONE,
-            HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline);
-
-    return containerStateManager
-        .allocateContainer(pipelineSelector,
-            HddsProtos.ReplicationType.STAND_ALONE,
+    return containerManager
+        .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
             HddsProtos.ReplicationFactor.THREE, "root").getContainerInfo();
 
   }
 
-  public static void closeContainer(ContainerStateManager containerStateManager,
-      org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo
-          container)
-      throws SCMException {
-
-    containerStateManager.getContainerStateMap()
-        .updateState(container, container.getState(), LifeCycleState.CLOSING);
-
-    containerStateManager.getContainerStateMap()
-        .updateState(container, container.getState(), LifeCycleState.CLOSED);
+  public static void closeContainer(ContainerManager containerManager,
+      ContainerID id) throws IOException {
+    containerManager.updateContainerState(
+        id, HddsProtos.LifeCycleEvent.FINALIZE);
+    containerManager.updateContainerState(
+        id, HddsProtos.LifeCycleEvent.CLOSE);
 
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
index 25f6ae3..a9c6906 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java
@@ -83,7 +83,7 @@ public class TestBlockManager implements EventHandler<Boolean> {
       throw new IOException("Unable to create test directory path");
     }
     nodeManager = new MockNodeManager(true, 10);
-    mapping = new SCMContainerManager(conf, nodeManager, 128, eventQueue);
+    mapping = new SCMContainerManager(conf, nodeManager, eventQueue);
     blockManager = new BlockManagerImpl(conf,
         nodeManager, mapping, eventQueue);
     eventQueue.addHandler(SCMEvents.CHILL_MODE_STATUS, blockManager);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index 0812027..5b76137 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -19,9 +19,10 @@ package org.apache.hadoop.hdds.scm.block;
 
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.lang3.RandomUtils;
+import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
@@ -61,7 +62,7 @@ import java.util.stream.Collectors;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .OZONE_SCM_BLOCK_DELETION_MAX_RETRY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
-import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyObject;
 import static org.mockito.Mockito.when;
 
 /**
@@ -109,9 +110,10 @@ public class TestDeletedBlockLog {
     pipeline.addMember(dnList.get(2));
     ContainerWithPipeline containerWithPipeline =
         new ContainerWithPipeline(containerInfo, pipeline);
-    when(containerManager.getContainerWithPipeline(anyLong()))
+    when(containerManager.getContainerWithPipeline(anyObject()))
         .thenReturn(containerWithPipeline);
-    when(containerManager.getContainer(anyLong())).thenReturn(containerInfo);
+    when(containerManager.getContainer(anyObject()))
+        .thenReturn(containerInfo);
   }
 
   @After
@@ -396,8 +398,8 @@ public class TestDeletedBlockLog {
     ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(
         containerInfo, pipeline);
     Mockito.doReturn(containerInfo).when(containerManager)
-        .getContainer(containerID);
+        .getContainer(ContainerID.valueof(containerID));
     Mockito.doReturn(containerWithPipeline).when(containerManager)
-        .getContainerWithPipeline(containerID);
+        .getContainerWithPipeline(ContainerID.valueof(containerID));
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 3917d39..517bc67 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -50,7 +50,7 @@ public class TestCloseContainerEventHandler {
 
   private static Configuration configuration;
   private static MockNodeManager nodeManager;
-  private static SCMContainerManager mapping;
+  private static SCMContainerManager containerManager;
   private static long size;
   private static File testDir;
   private static EventQueue eventQueue;
@@ -65,18 +65,18 @@ public class TestCloseContainerEventHandler {
     configuration
         .set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
     nodeManager = new MockNodeManager(true, 10);
-    mapping = new SCMContainerManager(configuration, nodeManager, 128,
+    containerManager = new SCMContainerManager(configuration, nodeManager,
         new EventQueue());
     eventQueue = new EventQueue();
     eventQueue.addHandler(CLOSE_CONTAINER,
-        new CloseContainerEventHandler(mapping));
+        new CloseContainerEventHandler(containerManager));
     eventQueue.addHandler(DATANODE_COMMAND, nodeManager);
   }
 
   @AfterClass
   public static void tearDown() throws Exception {
-    if (mapping != null) {
-      mapping.close();
+    if (containerManager != null) {
+      containerManager.close();
     }
     FileUtil.fullyDelete(testDir);
   }
@@ -109,7 +109,7 @@ public class TestCloseContainerEventHandler {
 
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerWithPipeline containerWithPipeline = mapping
+    ContainerWithPipeline containerWithPipeline = containerManager
         .allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
             HddsProtos.ReplicationFactor.ONE, "ozone");
     ContainerID id = new ContainerID(
@@ -123,7 +123,7 @@ public class TestCloseContainerEventHandler {
     // command in the Datanode
     Assert.assertEquals(0, nodeManager.getCommandCount(datanode));
     //Execute these state transitions so that we can close the container.
-    mapping.updateContainerState(id.getId(), CREATED);
+    containerManager.updateContainerState(id, CREATED);
     eventQueue.fireEvent(CLOSE_CONTAINER,
         new ContainerID(
             containerWithPipeline.getContainerInfo().getContainerID()));
@@ -131,7 +131,7 @@ public class TestCloseContainerEventHandler {
     Assert.assertEquals(closeCount + 1,
         nodeManager.getCommandCount(datanode));
     Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
-        mapping.getStateManager().getContainer(id).getState());
+        containerManager.getContainer(id).getState());
   }
 
   @Test
@@ -139,7 +139,7 @@ public class TestCloseContainerEventHandler {
 
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(CloseContainerEventHandler.LOG);
-    ContainerWithPipeline containerWithPipeline = mapping
+    ContainerWithPipeline containerWithPipeline = containerManager
         .allocateContainer(HddsProtos.ReplicationType.RATIS,
             HddsProtos.ReplicationFactor.THREE, "ozone");
     ContainerID id = new ContainerID(
@@ -160,7 +160,7 @@ public class TestCloseContainerEventHandler {
       i++;
     }
     //Execute these state transitions so that we can close the container.
-    mapping.updateContainerState(id.getId(), CREATED);
+    containerManager.updateContainerState(id, CREATED);
     eventQueue.fireEvent(CLOSE_CONTAINER, id);
     eventQueue.processAll(1000);
     i = 0;
@@ -170,7 +170,7 @@ public class TestCloseContainerEventHandler {
       Assert.assertEquals(closeCount[i] + 1,
           nodeManager.getCommandCount(details));
       Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
-          mapping.getStateManager().getContainer(id).getState());
+          containerManager.getContainer(id).getState());
       i++;
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
index 7f32be5..7135173 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerReportHandler.java
@@ -23,34 +23,28 @@ import java.util.List;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo
-    .Builder;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
-import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.container.replication
     .ReplicationActivityStatus;
 import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 import org.apache.hadoop.hdds.scm.node.NodeManager;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
     .ContainerReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.Event;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
+import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
 import org.junit.Before;
+import org.junit.Ignore;
 import org.junit.Test;
-import static org.mockito.Matchers.anyLong;
-import org.mockito.Mockito;
-import static org.mockito.Mockito.when;
-import org.mockito.stubbing.Answer;
+
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -60,7 +54,7 @@ import org.slf4j.LoggerFactory;
 public class TestContainerReportHandler implements EventPublisher {
 
   private List<Object> publishedEvents = new ArrayList<>();
-  private final NodeManager nodeManager = new MockNodeManager(true, 1);
+  private final NodeManager nodeManager = new MockNodeManager(true, 15);
 
   private static final Logger LOG =
       LoggerFactory.getLogger(TestContainerReportHandler.class);
@@ -70,27 +64,17 @@ public class TestContainerReportHandler implements EventPublisher {
     publishedEvents.clear();
   }
 
+  //TODO: Rewrite it
+  @Ignore
   @Test
   public void test() throws IOException {
+    String testDir = GenericTestUtils.getTempPath(
+        this.getClass().getSimpleName());
     //GIVEN
     OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerManager containerManager = Mockito.mock(ContainerManager.class);
-    PipelineSelector selector = Mockito.mock(PipelineSelector.class);
-
-    when(containerManager.getContainer(anyLong()))
-        .thenAnswer(
-            (Answer<ContainerInfo>) invocation ->
-                new Builder()
-                    .setReplicationFactor(ReplicationFactor.THREE)
-                    .setContainerID((Long) invocation.getArguments()[0])
-                    .setState(LifeCycleState.CLOSED)
-                    .build()
-      );
-
-    ContainerStateManager containerStateManager =
-        new ContainerStateManager(conf, containerManager, selector);
-
-    when(containerManager.getStateManager()).thenReturn(containerStateManager);
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, testDir);
+    SCMContainerManager containerManager = new SCMContainerManager(
+        conf, nodeManager, new EventQueue());
 
     ReplicationActivityStatus replicationActivityStatus =
         new ReplicationActivityStatus();
@@ -107,24 +91,16 @@ public class TestContainerReportHandler implements EventPublisher {
     nodeManager.addDatanodeInContainerMap(dn2.getUuid(), new HashSet<>());
     nodeManager.addDatanodeInContainerMap(dn3.getUuid(), new HashSet<>());
     nodeManager.addDatanodeInContainerMap(dn4.getUuid(), new HashSet<>());
-    PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class);
-
-    Pipeline pipeline = new Pipeline("leader", LifeCycleState.CLOSED,
-        ReplicationType.STAND_ALONE, ReplicationFactor.THREE,
-        PipelineID.randomId());
-
-    when(pipelineSelector.getReplicationPipeline(ReplicationType.STAND_ALONE,
-        ReplicationFactor.THREE)).thenReturn(pipeline);
 
-    ContainerInfo cont1 = containerStateManager
-        .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+    ContainerInfo cont1 = containerManager
+        .allocateContainer(ReplicationType.STAND_ALONE,
             ReplicationFactor.THREE, "root").getContainerInfo();
-    ContainerInfo cont2 = containerStateManager
-        .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+    ContainerInfo cont2 = containerManager
+        .allocateContainer(ReplicationType.STAND_ALONE,
             ReplicationFactor.THREE, "root").getContainerInfo();
     // Open Container
-    ContainerInfo cont3 = containerStateManager
-        .allocateContainer(pipelineSelector, ReplicationType.STAND_ALONE,
+    ContainerInfo cont3 = containerManager
+        .allocateContainer(ReplicationType.STAND_ALONE,
             ReplicationFactor.THREE, "root").getContainerInfo();
 
     long c1 = cont1.getContainerID();
@@ -132,8 +108,8 @@ public class TestContainerReportHandler implements EventPublisher {
     long c3 = cont3.getContainerID();
 
     // Close remaining containers
-    TestUtils.closeContainer(containerStateManager, cont1);
-    TestUtils.closeContainer(containerStateManager, cont2);
+    TestUtils.closeContainer(containerManager, cont1.containerID());
+    TestUtils.closeContainer(containerManager, cont2.containerID());
 
     //when
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index 785753b..69a3b31 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -18,19 +18,23 @@
 package org.apache.hadoop.hdds.scm.container;
 
 import java.io.IOException;
+import java.util.Set;
 
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 
+import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
+import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
 import org.mockito.Mockito;
 
+import static org.mockito.Mockito.when;
+
 /**
  * Testing ContainerStatemanager.
  */
@@ -41,16 +45,14 @@ public class TestContainerStateManager {
   @Before
   public void init() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
-    ContainerManager mapping = Mockito.mock(ContainerManager.class);
-    PipelineSelector selector =  Mockito.mock(PipelineSelector.class);
-    containerStateManager = new ContainerStateManager(conf, mapping, selector);
+    containerStateManager = new ContainerStateManager(conf);
 
   }
 
   @Test
   public void checkReplicationStateOK() throws IOException {
     //GIVEN
-    ContainerInfo c1 = TestUtils.allocateContainer(containerStateManager);
+    ContainerInfo c1 = allocateContainer();
 
     DatanodeDetails d1 = TestUtils.randomDatanodeDetails();
     DatanodeDetails d2 = TestUtils.randomDatanodeDetails();
@@ -61,18 +63,18 @@ public class TestContainerStateManager {
     addReplica(c1, d3);
 
     //WHEN
-    ReplicationRequest replicationRequest = containerStateManager
-        .checkReplicationState(new ContainerID(c1.getContainerID()));
+    Set<ContainerReplica> replicas = containerStateManager
+        .getContainerReplicas(c1.containerID());
 
     //THEN
-    Assert.assertNull(replicationRequest);
+    Assert.assertEquals(3, replicas.size());
   }
 
   @Test
   public void checkReplicationStateMissingReplica() throws IOException {
     //GIVEN
 
-    ContainerInfo c1 = TestUtils.allocateContainer(containerStateManager);
+    ContainerInfo c1 = allocateContainer();
 
     DatanodeDetails d1 = TestUtils.randomDatanodeDetails();
     DatanodeDetails d2 = TestUtils.randomDatanodeDetails();
@@ -81,18 +83,40 @@ public class TestContainerStateManager {
     addReplica(c1, d2);
 
     //WHEN
-    ReplicationRequest replicationRequest = containerStateManager
-        .checkReplicationState(new ContainerID(c1.getContainerID()));
+    Set<ContainerReplica> replicas = containerStateManager
+        .getContainerReplicas(c1.containerID());
 
-    Assert
-        .assertEquals(c1.getContainerID(), replicationRequest.getContainerId());
-    Assert.assertEquals(2, replicationRequest.getReplicationCount());
-    Assert.assertEquals(3, replicationRequest.getExpecReplicationCount());
+    Assert.assertEquals(2, replicas.size());
+    Assert.assertEquals(3, c1.getReplicationFactor().getNumber());
   }
 
-  private void addReplica(ContainerInfo c1, DatanodeDetails d1) {
+  private void addReplica(ContainerInfo cont, DatanodeDetails node)
+      throws ContainerNotFoundException {
+    ContainerReplica replica = ContainerReplica.newBuilder()
+        .setContainerID(cont.containerID())
+        .setDatanodeDetails(node)
+        .build();
     containerStateManager
-        .addContainerReplica(new ContainerID(c1.getContainerID()), d1);
+        .updateContainerReplica(cont.containerID(), replica);
+  }
+
+  private ContainerInfo allocateContainer() throws IOException {
+
+    PipelineSelector pipelineSelector = Mockito.mock(PipelineSelector.class);
+
+    Pipeline pipeline = new Pipeline("leader", HddsProtos.LifeCycleState.CLOSED,
+        HddsProtos.ReplicationType.STAND_ALONE,
+        HddsProtos.ReplicationFactor.THREE,
+        PipelineID.randomId());
+
+    when(pipelineSelector
+        .getReplicationPipeline(HddsProtos.ReplicationType.STAND_ALONE,
+            HddsProtos.ReplicationFactor.THREE)).thenReturn(pipeline);
+
+    return containerStateManager.allocateContainer(
+        pipelineSelector, HddsProtos.ReplicationType.STAND_ALONE,
+        HddsProtos.ReplicationFactor.THREE, "root");
+
   }
 
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
index b067ac9..75f8b8c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestSCMContainerManager.java
@@ -23,7 +23,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.XceiverClientManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -32,12 +31,10 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.common.SCMTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.LambdaTestUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Before;
@@ -50,7 +47,6 @@ import java.io.File;
 import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
-import java.util.NavigableSet;
 import java.util.Random;
 import java.util.Set;
 import java.util.TreeSet;
@@ -89,7 +85,7 @@ public class TestSCMContainerManager {
       throw new IOException("Unable to create test directory path");
     }
     nodeManager = new MockNodeManager(true, 10);
-    containerManager = new SCMContainerManager(conf, nodeManager, 128,
+    containerManager = new SCMContainerManager(conf, nodeManager,
         new EventQueue());
     xceiverClientManager = new XceiverClientManager(conf);
     random = new Random();
@@ -169,28 +165,36 @@ public class TestSCMContainerManager {
         .setIpAddress("2.2.2.2")
         .setUuid(UUID.randomUUID().toString()).build();
     containerManager
-        .updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CREATE);
-    containerManager.updateContainerState(contInfo.getContainerID(),
+        .updateContainerState(contInfo.containerID(), LifeCycleEvent.CREATE);
+    containerManager.updateContainerState(contInfo.containerID(),
         LifeCycleEvent.CREATED);
-    containerManager.updateContainerState(contInfo.getContainerID(),
+    containerManager.updateContainerState(contInfo.containerID(),
         LifeCycleEvent.FINALIZE);
     containerManager
-        .updateContainerState(contInfo.getContainerID(), LifeCycleEvent.CLOSE);
+        .updateContainerState(contInfo.containerID(), LifeCycleEvent.CLOSE);
     ContainerInfo finalContInfo = contInfo;
-    LambdaTestUtils.intercept(SCMException.class, "No entry exist for "
-        + "containerId:", () -> containerManager.getContainerWithPipeline(
-        finalContInfo.getContainerID()));
-
-    containerManager.getStateManager().getContainerStateMap()
-        .addContainerReplica(contInfo.containerID(), dn1, dn2);
-
-    contInfo = containerManager.getContainer(contInfo.getContainerID());
+    Assert.assertEquals(0,
+        containerManager.getContainerReplicas(
+            finalContInfo.containerID()).size());
+
+    containerManager.updateContainerReplica(contInfo.containerID(),
+        ContainerReplica.newBuilder().setContainerID(contInfo.containerID())
+        .setDatanodeDetails(dn1).build());
+    containerManager.updateContainerReplica(contInfo.containerID(),
+        ContainerReplica.newBuilder().setContainerID(contInfo.containerID())
+        .setDatanodeDetails(dn2).build());
+
+    Assert.assertEquals(2,
+        containerManager.getContainerReplicas(
+            finalContInfo.containerID()).size());
+
+    contInfo = containerManager.getContainer(contInfo.containerID());
     Assert.assertEquals(contInfo.getState(), LifeCycleState.CLOSED);
     Pipeline pipeline = containerWithPipeline.getPipeline();
     containerManager.getPipelineSelector().finalizePipeline(pipeline);
 
     ContainerWithPipeline containerWithPipeline2 = containerManager
-        .getContainerWithPipeline(contInfo.getContainerID());
+        .getContainerWithPipeline(contInfo.containerID());
     pipeline = containerWithPipeline2.getPipeline();
     Assert.assertNotEquals(containerWithPipeline, containerWithPipeline2);
     Assert.assertNotNull("Pipeline should not be null", pipeline);
@@ -199,9 +203,14 @@ public class TestSCMContainerManager {
   }
 
   @Test
-  public void testgetNoneExistentContainer() throws IOException {
-    thrown.expectMessage("Specified key does not exist.");
-    containerManager.getContainer(random.nextLong());
+  public void testgetNoneExistentContainer() {
+    try {
+      containerManager.getContainer(ContainerID.valueof(
+          random.nextInt() & Integer.MAX_VALUE));
+      Assert.fail();
+    } catch (ContainerNotFoundException ex) {
+      // Success!
+    }
   }
 
   @Test
@@ -213,21 +222,13 @@ public class TestSCMContainerManager {
         xceiverClientManager.getFactor(),
         containerOwner);
     containerManager.updateContainerState(containerInfo.getContainerInfo()
-            .getContainerID(), HddsProtos.LifeCycleEvent.CREATE);
+        .containerID(), HddsProtos.LifeCycleEvent.CREATE);
     Thread.sleep(TIMEOUT + 1000);
 
-    NavigableSet<ContainerID> deleteContainers = containerManager
-        .getStateManager().getMatchingContainerIDs("OZONE",
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(),
-            HddsProtos.LifeCycleState.DELETING);
-    Assert.assertTrue(deleteContainers
-        .contains(containerInfo.getContainerInfo().containerID()));
-
     thrown.expect(IOException.class);
     thrown.expectMessage("Lease Exception");
     containerManager
-        .updateContainerState(containerInfo.getContainerInfo().getContainerID(),
+        .updateContainerState(containerInfo.getContainerInfo().containerID(),
             HddsProtos.LifeCycleEvent.CREATED);
   }
 
@@ -257,26 +258,24 @@ public class TestSCMContainerManager {
     crBuilder.addAllReports(reports);
 
     containerManager.processContainerReports(
-        datanodeDetails, crBuilder.build(), false);
+        datanodeDetails, crBuilder.build());
 
     ContainerInfo updatedContainer =
-        containerManager.getContainer(info.getContainerID());
+        containerManager.getContainer(info.containerID());
     Assert.assertEquals(100000000L,
         updatedContainer.getNumberOfKeys());
     Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes());
 
     for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
-      LambdaTestUtils.intercept(SCMException.class, "No entry "
-          + "exist for containerId:", () -> containerManager.getStateManager()
-          .getContainerReplicas(ContainerID.valueof(c.getContainerID())));
+     Assert.assertEquals(containerManager.getContainerReplicas(
+         ContainerID.valueof(c.getContainerID())).size(), 1);
     }
 
     containerManager.processContainerReports(TestUtils.randomDatanodeDetails(),
-        crBuilder.build(), true);
+        crBuilder.build());
     for (StorageContainerDatanodeProtocolProtos.ContainerInfo c : reports) {
-      Assert.assertTrue(containerManager.getStateManager()
-          .getContainerReplicas(
-              ContainerID.valueof(c.getContainerID())).size() > 0);
+      Assert.assertEquals(containerManager.getContainerReplicas(
+              ContainerID.valueof(c.getContainerID())).size(), 2);
     }
   }
 
@@ -314,9 +313,10 @@ public class TestSCMContainerManager {
     crBuilder.addAllReports(reports);
 
     containerManager.processContainerReports(
-        datanodeDetails, crBuilder.build(), false);
+        datanodeDetails, crBuilder.build());
 
-    List<ContainerInfo> list = containerManager.listContainer(0, 50);
+    List<ContainerInfo> list = containerManager.listContainer(
+        ContainerID.valueof(1), 50);
     Assert.assertEquals(2, list.stream().filter(
         x -> x.getContainerID() == cID1 || x.getContainerID() == cID2).count());
     Assert.assertEquals(300000000L, list.stream().filter(
@@ -329,23 +329,13 @@ public class TestSCMContainerManager {
 
   @Test
   public void testCloseContainer() throws IOException {
-    ContainerInfo info = createContainer();
-    containerManager.updateContainerState(info.getContainerID(),
+    ContainerID id = createContainer().containerID();
+    containerManager.updateContainerState(id,
         HddsProtos.LifeCycleEvent.FINALIZE);
-    NavigableSet<ContainerID> pendingCloseContainers = containerManager
-        .getStateManager().getMatchingContainerIDs(containerOwner,
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(),
-            HddsProtos.LifeCycleState.CLOSING);
-    Assert.assertTrue(pendingCloseContainers.contains(info.containerID()));
-    containerManager.updateContainerState(info.getContainerID(),
+    containerManager.updateContainerState(id,
         HddsProtos.LifeCycleEvent.CLOSE);
-    NavigableSet<ContainerID> closeContainers = containerManager
-        .getStateManager().getMatchingContainerIDs(containerOwner,
-            xceiverClientManager.getType(),
-            xceiverClientManager.getFactor(),
-            HddsProtos.LifeCycleState.CLOSED);
-    Assert.assertTrue(closeContainers.contains(info.containerID()));
+   ContainerInfo closedContainer = containerManager.getContainer(id);
+   Assert.assertEquals(LifeCycleState.CLOSED, closedContainer.getState());
   }
 
   /**
@@ -359,20 +349,11 @@ public class TestSCMContainerManager {
         .allocateContainer(xceiverClientManager.getType(),
             xceiverClientManager.getFactor(), containerOwner);
     ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
-    containerManager.updateContainerState(containerInfo.getContainerID(),
+    containerManager.updateContainerState(containerInfo.containerID(),
         HddsProtos.LifeCycleEvent.CREATE);
-    containerManager.updateContainerState(containerInfo.getContainerID(),
+    containerManager.updateContainerState(containerInfo.containerID(),
         HddsProtos.LifeCycleEvent.CREATED);
     return containerInfo;
   }
 
-  @Test
-  public void testFlushAllContainers() throws IOException {
-    ContainerInfo info = createContainer();
-    List<ContainerInfo> containers = containerManager.getStateManager()
-        .getAllContainers();
-    Assert.assertTrue(containers.size() > 0);
-    containerManager.flushContainerInfo();
-  }
-
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index 06beb7c..b0951c8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -23,6 +23,7 @@ import java.util.HashSet;
 import java.util.Iterator;
 import java.util.List;
 import java.util.Objects;
+import java.util.stream.IntStream;
 
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
@@ -31,8 +32,9 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReplicateContainerCommandProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerManager;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.PipelineID;
 import org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementPolicy;
@@ -62,10 +64,11 @@ public class TestReplicationManager {
 
   private List<CommandForDatanode<ReplicateContainerCommandProto>> copyEvents;
 
-  private ContainerStateManager containerStateManager;
+  private ContainerManager containerManager;
 
   private ContainerPlacementPolicy containerPlacementPolicy;
   private List<DatanodeDetails> listOfDatanodeDetails;
+  private List<ContainerReplica> listOfContainerReplica;
   private LeaseManager<Long> leaseManager;
   private ReplicationManager replicationManager;
 
@@ -73,33 +76,36 @@ public class TestReplicationManager {
   public void initReplicationManager() throws IOException {
 
     listOfDatanodeDetails = new ArrayList<>();
-    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
-    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
-    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
-    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
-    listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+    listOfContainerReplica = new ArrayList<>();
+    IntStream.range(1, 6).forEach(i -> {
+      DatanodeDetails dd = TestUtils.randomDatanodeDetails();
+      listOfDatanodeDetails.add(dd);
+      listOfContainerReplica.add(ContainerReplica.newBuilder()
+          .setContainerID(ContainerID.valueof(i))
+          .setDatanodeDetails(dd).build());
+    });
 
     containerPlacementPolicy =
         (excludedNodes, nodesRequired, sizeRequired) -> listOfDatanodeDetails
             .subList(2, 2 + nodesRequired);
 
-    containerStateManager = Mockito.mock(ContainerStateManager.class);
+    containerManager = Mockito.mock(ContainerManager.class);
 
     ContainerInfo containerInfo = new ContainerInfo.Builder()
         .setState(LifeCycleState.CLOSED)
         .build();
 
-    when(containerStateManager.getContainer(anyObject()))
+    when(containerManager.getContainer(anyObject()))
         .thenReturn(containerInfo);
 
-    when(containerStateManager.getContainerReplicas(new ContainerID(1L)))
+    when(containerManager.getContainerReplicas(new ContainerID(1L)))
         .thenReturn(new HashSet<>(Arrays.asList(
-            listOfDatanodeDetails.get(0),
-            listOfDatanodeDetails.get(1)
+            listOfContainerReplica.get(0),
+            listOfContainerReplica.get(1)
         )));
 
 
-    when(containerStateManager.getContainerReplicas(new ContainerID(3L)))
+    when(containerManager.getContainerReplicas(new ContainerID(3L)))
         .thenReturn(new HashSet<>());
 
     queue = new EventQueue();
@@ -115,7 +121,7 @@ public class TestReplicationManager {
     leaseManager = new LeaseManager<>("Test", 100000L);
 
     replicationManager = new ReplicationManager(containerPlacementPolicy,
-        containerStateManager, queue, leaseManager);
+        containerManager, queue, leaseManager);
 
 
 
@@ -182,7 +188,7 @@ public class TestReplicationManager {
         new LeaseManager<>("Test", 1000L);
 
     replicationManager = new ReplicationManager(containerPlacementPolicy,
-        containerStateManager, queue, rapidLeaseManager);
+        containerManager, queue, rapidLeaseManager);
 
     try {
       rapidLeaseManager.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 175180a..fb08ad2 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -102,7 +102,7 @@ public class TestContainerPlacement {
     EventQueue eventQueue = new EventQueue();
     final int cacheSize = config.getInt(OZONE_SCM_DB_CACHE_SIZE_MB,
         OZONE_SCM_DB_CACHE_SIZE_DEFAULT);
-    return new SCMContainerManager(config, scmNodeManager, cacheSize,
+    return new SCMContainerManager(config, scmNodeManager,
         eventQueue);
 
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 9d6927d..d971e68 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -18,38 +18,40 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
+import java.io.File;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
-import java.util.List;
 import java.util.Set;
+import java.util.UUID;
 import java.util.stream.Collectors;
 
+import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.ContainerManager;
-import org.apache.hadoop.hdds.scm.container.ContainerStateManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerNotFoundException;
+import org.apache.hadoop.hdds.scm.container.ContainerReplica;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
-import org.apache.hadoop.hdds.scm.container.replication.ReplicationRequest;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher.NodeReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
 
 import org.apache.hadoop.hdds.server.events.EventQueue;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Test;
-import org.mockito.ArgumentCaptor;
-import static org.mockito.Matchers.eq;
 import org.mockito.Mockito;
 
 /**
@@ -57,86 +59,122 @@ import org.mockito.Mockito;
  */
 public class TestDeadNodeHandler {
 
-  private List<ReplicationRequest> sentEvents = new ArrayList<>();
   private SCMNodeManager nodeManager;
-  private ContainerStateManager containerStateManager;
+  private ContainerManager containerManager;
   private NodeReportHandler nodeReportHandler;
   private DeadNodeHandler deadNodeHandler;
   private EventPublisher publisher;
   private EventQueue eventQueue;
+  private String storageDir;
 
   @Before
   public void setup() throws IOException {
     OzoneConfiguration conf = new OzoneConfiguration();
-    containerStateManager = new ContainerStateManager(conf,
-        Mockito.mock(ContainerManager.class),
-        Mockito.mock(PipelineSelector.class));
+    storageDir = GenericTestUtils.getTempPath(
+        TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, storageDir);
     eventQueue = new EventQueue();
     nodeManager = new SCMNodeManager(conf, "cluster1", null, eventQueue);
-    deadNodeHandler = new DeadNodeHandler(nodeManager,
-        containerStateManager);
+    containerManager = new SCMContainerManager(conf, nodeManager, eventQueue);
+    deadNodeHandler = new DeadNodeHandler(nodeManager, containerManager);
     eventQueue.addHandler(SCMEvents.DEAD_NODE, deadNodeHandler);
     publisher = Mockito.mock(EventPublisher.class);
     nodeReportHandler = new NodeReportHandler(nodeManager);
   }
 
+  @After
+  public void teardown() {
+    FileUtil.fullyDelete(new File(storageDir));
+  }
+
   @Test
   public void testOnMessage() throws IOException {
     //GIVEN
     DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
     DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails datanode3 = TestUtils.randomDatanodeDetails();
+
+    String storagePath = GenericTestUtils.getRandomizedTempPath()
+        .concat("/" + datanode1.getUuidString());
+
+    StorageReportProto storageOne = TestUtils.createStorageReport(
+        datanode1.getUuid(), storagePath, 100, 10, 90, null);
+
+    // Standalone pipeline now excludes the nodes which are already used,
+    // is the a proper behavior. Adding 9 datanodes for now to make the
+    // test case happy.
+
+    nodeManager.register(datanode1,
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(datanode2,
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(datanode3,
+        TestUtils.createNodeReport(storageOne), null);
+
+    nodeManager.register(TestUtils.randomDatanodeDetails(),
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(TestUtils.randomDatanodeDetails(),
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(TestUtils.randomDatanodeDetails(),
+        TestUtils.createNodeReport(storageOne), null);
+
+    nodeManager.register(TestUtils.randomDatanodeDetails(),
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(TestUtils.randomDatanodeDetails(),
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(TestUtils.randomDatanodeDetails(),
+        TestUtils.createNodeReport(storageOne), null);
 
     ContainerInfo container1 =
-        TestUtils.allocateContainer(containerStateManager);
+        TestUtils.allocateContainer(containerManager);
     ContainerInfo container2 =
-        TestUtils.allocateContainer(containerStateManager);
+        TestUtils.allocateContainer(containerManager);
     ContainerInfo container3 =
-        TestUtils.allocateContainer(containerStateManager);
+        TestUtils.allocateContainer(containerManager);
+
+    containerManager.updateContainerState(
+        container1.containerID(), HddsProtos.LifeCycleEvent.CREATE);
+    containerManager.updateContainerState(
+        container1.containerID(), HddsProtos.LifeCycleEvent.CREATED);
+
+    containerManager.updateContainerState(
+        container2.containerID(), HddsProtos.LifeCycleEvent.CREATE);
+    containerManager.updateContainerState(
+        container2.containerID(), HddsProtos.LifeCycleEvent.CREATED);
+
+    containerManager.updateContainerState(
+        container3.containerID(), HddsProtos.LifeCycleEvent.CREATE);
+    containerManager.updateContainerState(
+        container3.containerID(), HddsProtos.LifeCycleEvent.CREATED);
 
     registerReplicas(datanode1, container1, container2);
     registerReplicas(datanode2, container1, container3);
 
-    registerReplicas(containerStateManager, container1, datanode1, datanode2);
-    registerReplicas(containerStateManager, container2, datanode1);
-    registerReplicas(containerStateManager, container3, datanode2);
+    registerReplicas(containerManager, container1, datanode1, datanode2);
+    registerReplicas(containerManager, container2, datanode1);
+    registerReplicas(containerManager, container3, datanode2);
 
-    TestUtils.closeContainer(containerStateManager, container1);
+    TestUtils.closeContainer(containerManager, container1.containerID());
+    TestUtils.closeContainer(containerManager, container2.containerID());
+    TestUtils.closeContainer(containerManager, container3.containerID());
 
     deadNodeHandler.onMessage(datanode1, publisher);
 
-    Set<DatanodeDetails> container1Replicas =
-        containerStateManager.getContainerStateMap()
-            .getContainerReplicas(new ContainerID(container1.getContainerID()));
+    Set<ContainerReplica> container1Replicas = containerManager
+        .getContainerReplicas(new ContainerID(container1.getContainerID()));
     Assert.assertEquals(1, container1Replicas.size());
-    Assert.assertEquals(datanode2, container1Replicas.iterator().next());
+    Assert.assertEquals(datanode2,
+        container1Replicas.iterator().next().getDatanodeDetails());
 
-    Set<DatanodeDetails> container2Replicas =
-        containerStateManager.getContainerStateMap()
-            .getContainerReplicas(new ContainerID(container2.getContainerID()));
+    Set<ContainerReplica> container2Replicas = containerManager
+        .getContainerReplicas(new ContainerID(container2.getContainerID()));
     Assert.assertEquals(0, container2Replicas.size());
 
-    Set<DatanodeDetails> container3Replicas =
-        containerStateManager.getContainerStateMap()
+    Set<ContainerReplica> container3Replicas = containerManager
             .getContainerReplicas(new ContainerID(container3.getContainerID()));
     Assert.assertEquals(1, container3Replicas.size());
-    Assert.assertEquals(datanode2, container3Replicas.iterator().next());
-
-    ArgumentCaptor<ReplicationRequest> replicationRequestParameter =
-        ArgumentCaptor.forClass(ReplicationRequest.class);
-
-    Mockito.verify(publisher)
-        .fireEvent(eq(SCMEvents.REPLICATE_CONTAINER),
-            replicationRequestParameter.capture());
-
-    Assert
-        .assertEquals(container1.getContainerID(),
-            replicationRequestParameter.getValue().getContainerId());
-    Assert
-        .assertEquals(1,
-            replicationRequestParameter.getValue().getReplicationCount());
-    Assert
-        .assertEquals(3,
-            replicationRequestParameter.getValue().getExpecReplicationCount());
+    Assert.assertEquals(datanode2,
+        container3Replicas.iterator().next().getDatanodeDetails());
   }
 
   @Test
@@ -144,6 +182,7 @@ public class TestDeadNodeHandler {
     //GIVEN
     DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
     DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails();
+
     String storagePath1 = GenericTestUtils.getRandomizedTempPath()
         .concat("/" + datanode1.getUuidString());
     String storagePath2 = GenericTestUtils.getRandomizedTempPath()
@@ -153,15 +192,17 @@ public class TestDeadNodeHandler {
         datanode1.getUuid(), storagePath1, 100, 10, 90, null);
     StorageReportProto storageTwo = TestUtils.createStorageReport(
         datanode2.getUuid(), storagePath2, 200, 20, 180, null);
+
+    nodeManager.register(datanode1,
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(datanode2,
+        TestUtils.createNodeReport(storageTwo), null);
+
     nodeReportHandler.onMessage(getNodeReport(datanode1, storageOne),
         Mockito.mock(EventPublisher.class));
     nodeReportHandler.onMessage(getNodeReport(datanode2, storageTwo),
         Mockito.mock(EventPublisher.class));
 
-    ContainerInfo container1 =
-        TestUtils.allocateContainer(containerStateManager);
-    registerReplicas(datanode1, container1);
-
     SCMNodeStat stat = nodeManager.getStats();
     Assert.assertTrue(stat.getCapacity().get() == 300);
     Assert.assertTrue(stat.getRemaining().get() == 270);
@@ -190,32 +231,56 @@ public class TestDeadNodeHandler {
 
   @Test
   public void testOnMessageReplicaFailure() throws Exception {
+
+    DatanodeDetails datanode1 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails datanode2 = TestUtils.randomDatanodeDetails();
+    DatanodeDetails datanode3 = TestUtils.randomDatanodeDetails();
+
+    String storagePath = GenericTestUtils.getRandomizedTempPath()
+        .concat("/" + datanode1.getUuidString());
+
+    StorageReportProto storageOne = TestUtils.createStorageReport(
+        datanode1.getUuid(), storagePath, 100, 10, 90, null);
+
+    nodeManager.register(datanode1,
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(datanode2,
+        TestUtils.createNodeReport(storageOne), null);
+    nodeManager.register(datanode3,
+        TestUtils.createNodeReport(storageOne), null);
+
     DatanodeDetails dn1 = TestUtils.randomDatanodeDetails();
     GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
         .captureLogs(DeadNodeHandler.getLogger());
-    String storagePath1 = GenericTestUtils.getRandomizedTempPath()
-        .concat("/" + dn1.getUuidString());
 
-    StorageReportProto storageOne = TestUtils.createStorageReport(
-        dn1.getUuid(), storagePath1, 100, 10, 90, null);
     nodeReportHandler.onMessage(getNodeReport(dn1, storageOne),
         Mockito.mock(EventPublisher.class));
 
     ContainerInfo container1 =
-        TestUtils.allocateContainer(containerStateManager);
+        TestUtils.allocateContainer(containerManager);
+    containerManager.updateContainerState(
+        container1.containerID(), HddsProtos.LifeCycleEvent.CREATE);
+    containerManager.updateContainerState(
+        container1.containerID(), HddsProtos.LifeCycleEvent.CREATED);
+    TestUtils.closeContainer(containerManager, container1.containerID());
+
     registerReplicas(dn1, container1);
 
     deadNodeHandler.onMessage(dn1, eventQueue);
     Assert.assertTrue(logCapturer.getOutput().contains(
-        "DataNode " + dn1.getUuid() + " doesn't have replica for container "
-            + container1.getContainerID()));
+        "Exception while removing container replica "));
   }
 
-  private void registerReplicas(ContainerStateManager csm,
-      ContainerInfo container, DatanodeDetails... datanodes) {
-    csm.getContainerStateMap()
-        .addContainerReplica(new ContainerID(container.getContainerID()),
-            datanodes);
+  private void registerReplicas(ContainerManager containerManager,
+      ContainerInfo container, DatanodeDetails... datanodes)
+      throws ContainerNotFoundException {
+    for (DatanodeDetails datanode : datanodes) {
+      containerManager.updateContainerReplica(
+          new ContainerID(container.getContainerID()),
+          ContainerReplica.newBuilder()
+              .setContainerID(container.containerID())
+              .setDatanodeDetails(datanode).build());
+    }
   }
 
   private void registerReplicas(DatanodeDetails datanode,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java
index 41708a4..ef28354 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMChillModeManager.java
@@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.HddsConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.scm.HddsTestUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.server.events.EventQueue;
 import org.apache.hadoop.test.GenericTestUtils;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
index 27195a1..ec13534 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/TestCloseContainerWatcher.java
@@ -28,7 +28,7 @@ import org.apache.hadoop.hdds.scm.container.CloseContainerEventHandler
 import org.apache.hadoop.hdds.scm.container.CloseContainerWatcher;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.SCMContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.events.SCMEvents;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -274,10 +274,12 @@ public class TestCloseContainerWatcher implements EventHandler<ContainerID> {
       throws IOException {
     ContainerInfo containerInfo = Mockito.mock(ContainerInfo.class);
     ContainerInfo containerInfo2 = Mockito.mock(ContainerInfo.class);
-    when(containerManager.getContainer(id1)).thenReturn(containerInfo);
-    when(containerManager.getContainer(id2)).thenReturn(containerInfo2);
-    when(containerInfo.isContainerOpen()).thenReturn(true);
-    when(containerInfo2.isContainerOpen()).thenReturn(isOpen);
+    when(containerManager.getContainer(ContainerID.valueof(id1)))
+        .thenReturn(containerInfo);
+    when(containerManager.getContainer(ContainerID.valueof(id2)))
+        .thenReturn(containerInfo2);
+    when(containerInfo.isOpen()).thenReturn(true);
+    when(containerInfo2.isOpen()).thenReturn(isOpen);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
index 59cd0ba..5ce0658 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/SCMCLI.java
@@ -34,7 +34,7 @@ import org.apache.hadoop.hdds.scm.cli.container.InfoSubcommand;
 import org.apache.hadoop.hdds.scm.cli.container.ListSubcommand;
 import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.scm.protocolPB
     .StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
index 0f520fd..431befe 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ListSubcommand.java
@@ -24,7 +24,7 @@ import java.util.concurrent.Callable;
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
 import org.apache.hadoop.hdds.scm.cli.SCMCLI;
 import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.ozone.web.utils.JsonUtils;
 
 import org.slf4j.Logger;
@@ -52,7 +52,7 @@ public class ListSubcommand implements Callable<Void> {
 
   @Option(names = {"-s", "--start"},
       description = "Container id to start the iteration", required = true)
-  private long startId;
+  private long startId = 1;
 
   @Option(names = {"-c", "--count"},
       description = "Maximum number of containers to list",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50715c06/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index 0a38a5a..9f46b2d 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -21,7 +21,7 @@ import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.fs.FSExceptionMessages;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
 import org.apache.hadoop.hdds.client.BlockID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.io.retry.RetryPolicy;


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org