You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by ae...@apache.org on 2018/07/03 21:12:08 UTC
[2/3] hadoop git commit: HDDS-175. Refactor ContainerInfo to remove
Pipeline object from it. Contributed by Ajay Kumar.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
index d06d568..9255ec7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
@@ -22,6 +22,7 @@ import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.Mapping;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -362,10 +363,16 @@ public class TestDeletedBlockLog {
pipeline.addMember(dd);
ContainerInfo.Builder builder = new ContainerInfo.Builder();
- builder.setPipeline(pipeline);
-
- ContainerInfo conatinerInfo = builder.build();
- Mockito.doReturn(conatinerInfo).when(mappingService)
+ builder.setPipelineName(pipeline.getPipelineName())
+ .setReplicationType(pipeline.getType())
+ .setReplicationFactor(pipeline.getFactor());
+
+ ContainerInfo containerInfo = builder.build();
+ ContainerWithPipeline containerWithPipeline = new ContainerWithPipeline(
+ containerInfo, pipeline);
+ Mockito.doReturn(containerInfo).when(mappingService)
.getContainer(containerID);
+ Mockito.doReturn(containerWithPipeline).when(mappingService)
+ .getContainerWithPipeline(containerID);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index 09ade3e..721dbf6 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -21,7 +21,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.SCMTestUtils;
@@ -97,7 +97,7 @@ public class TestCloseContainerEventHandler {
new ContainerID(id));
eventQueue.processAll(1000);
Assert.assertTrue(logCapturer.getOutput()
- .contains("Container with id : " + id + " does not exist"));
+ .contains("Failed to update the container state"));
}
@Test
@@ -105,11 +105,12 @@ public class TestCloseContainerEventHandler {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(CloseContainerEventHandler.LOG);
- ContainerInfo info = mapping
+ ContainerWithPipeline containerWithPipeline = mapping
.allocateContainer(HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE, "ozone");
- ContainerID id = new ContainerID(info.getContainerID());
- DatanodeDetails datanode = info.getPipeline().getLeader();
+ ContainerID id = new ContainerID(
+ containerWithPipeline.getContainerInfo().getContainerID());
+ DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
int closeCount = nodeManager.getCommandCount(datanode);
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
eventQueue.processAll(1000);
@@ -125,7 +126,8 @@ public class TestCloseContainerEventHandler {
mapping.updateContainerState(id.getId(), CREATE);
mapping.updateContainerState(id.getId(), CREATED);
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
- new ContainerID(info.getContainerID()));
+ new ContainerID(
+ containerWithPipeline.getContainerInfo().getContainerID()));
eventQueue.processAll(1000);
Assert.assertEquals(closeCount + 1, nodeManager.getCommandCount(datanode));
Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
@@ -137,20 +139,23 @@ public class TestCloseContainerEventHandler {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(CloseContainerEventHandler.LOG);
- ContainerInfo info = mapping
+ ContainerWithPipeline containerWithPipeline = mapping
.allocateContainer(HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE, "ozone");
- ContainerID id = new ContainerID(info.getContainerID());
+ ContainerID id = new ContainerID(
+ containerWithPipeline.getContainerInfo().getContainerID());
int[] closeCount = new int[3];
eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
eventQueue.processAll(1000);
int i = 0;
- for (DatanodeDetails details : info.getPipeline().getMachines()) {
+ for (DatanodeDetails details : containerWithPipeline.getPipeline()
+ .getMachines()) {
closeCount[i] = nodeManager.getCommandCount(details);
i++;
}
i = 0;
- for (DatanodeDetails details : info.getPipeline().getMachines()) {
+ for (DatanodeDetails details : containerWithPipeline.getPipeline()
+ .getMachines()) {
Assert.assertEquals(closeCount[i], nodeManager.getCommandCount(details));
i++;
}
@@ -161,12 +166,12 @@ public class TestCloseContainerEventHandler {
//Execute these state transitions so that we can close the container.
mapping.updateContainerState(id.getId(), CREATE);
mapping.updateContainerState(id.getId(), CREATED);
- eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT,
- new ContainerID(info.getContainerID()));
+ eventQueue.fireEvent(CloseContainerEventHandler.CLOSE_CONTAINER_EVENT, id);
eventQueue.processAll(1000);
i = 0;
// Make sure close is queued for each datanode on the pipeline
- for (DatanodeDetails details : info.getPipeline().getMachines()) {
+ for (DatanodeDetails details : containerWithPipeline.getPipeline()
+ .getMachines()) {
Assert.assertEquals(closeCount[i] + 1,
nodeManager.getCommandCount(details));
Assert.assertEquals(HddsProtos.LifeCycleState.CLOSING,
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index eefb639..42ab126 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -103,7 +104,7 @@ public class TestContainerMapping {
@Test
public void testallocateContainer() throws Exception {
- ContainerInfo containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@@ -120,7 +121,7 @@ public class TestContainerMapping {
*/
Set<UUID> pipelineList = new TreeSet<>();
for (int x = 0; x < 30; x++) {
- ContainerInfo containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
@@ -135,14 +136,13 @@ public class TestContainerMapping {
@Test
public void testGetContainer() throws IOException {
- ContainerInfo containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
Pipeline pipeline = containerInfo.getPipeline();
Assert.assertNotNull(pipeline);
- Pipeline newPipeline = mapping.getContainer(
- containerInfo.getContainerID()).getPipeline();
+ Pipeline newPipeline = containerInfo.getPipeline();
Assert.assertEquals(pipeline.getLeader().getUuid(),
newPipeline.getLeader().getUuid());
}
@@ -165,12 +165,12 @@ public class TestContainerMapping {
public void testContainerCreationLeaseTimeout() throws IOException,
InterruptedException {
nodeManager.setChillmode(false);
- ContainerInfo containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerInfo = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
- mapping.updateContainerState(containerInfo.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
+ mapping.updateContainerState(containerInfo.getContainerInfo()
+ .getContainerID(), HddsProtos.LifeCycleEvent.CREATE);
Thread.sleep(TIMEOUT + 1000);
NavigableSet<ContainerID> deleteContainers = mapping.getStateManager()
@@ -179,12 +179,14 @@ public class TestContainerMapping {
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.DELETING);
- Assert.assertTrue(deleteContainers.contains(containerInfo.containerID()));
+ Assert.assertTrue(deleteContainers
+ .contains(containerInfo.getContainerInfo().containerID()));
thrown.expect(IOException.class);
thrown.expectMessage("Lease Exception");
- mapping.updateContainerState(containerInfo.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATED);
+ mapping
+ .updateContainerState(containerInfo.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATED);
}
@Test
@@ -294,10 +296,11 @@ public class TestContainerMapping {
private ContainerInfo createContainer()
throws IOException {
nodeManager.setChillmode(false);
- ContainerInfo containerInfo = mapping.allocateContainer(
+ ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
+ ContainerInfo containerInfo = containerWithPipeline.getContainerInfo();
mapping.updateContainerState(containerInfo.getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
mapping.updateContainerState(containerInfo.getContainerID(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index 0d7848f..74238a7 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.MockNodeManager;
import org.apache.hadoop.hdds.scm.container.TestContainerMapping;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -91,9 +92,10 @@ public class TestContainerCloser {
@Test
public void testClose() throws IOException {
- ContainerInfo info = mapping.allocateContainer(
+ ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE, "ozone");
+ ContainerInfo info = containerWithPipeline.getContainerInfo();
//Execute these state transitions so that we can close the container.
mapping.updateContainerState(info.getContainerID(), CREATE);
@@ -101,7 +103,7 @@ public class TestContainerCloser {
long currentCount = mapping.getCloser().getCloseCount();
long runCount = mapping.getCloser().getThreadRunCount();
- DatanodeDetails datanode = info.getPipeline().getLeader();
+ DatanodeDetails datanode = containerWithPipeline.getPipeline().getLeader();
// Send a container report with used set to 1 GB. This should not close.
sendContainerReport(info, 1 * GIGABYTE);
@@ -138,9 +140,10 @@ public class TestContainerCloser {
configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1,
TimeUnit.SECONDS);
- ContainerInfo info = mapping.allocateContainer(
+ ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE, "ozone");
+ ContainerInfo info = containerWithPipeline.getContainerInfo();
//Execute these state transitions so that we can close the container.
mapping.updateContainerState(info.getContainerID(), CREATE);
@@ -148,10 +151,10 @@ public class TestContainerCloser {
long currentCount = mapping.getCloser().getCloseCount();
long runCount = mapping.getCloser().getThreadRunCount();
+ DatanodeDetails datanodeDetails = containerWithPipeline.getPipeline()
+ .getLeader();
- DatanodeDetails datanodeDetails = info.getPipeline().getLeader();
-
- // Send this command twice and assert we have only one command in the queue.
+ // Send this command twice and assert we have only one command in queue.
sendContainerReport(info, 5 * GIGABYTE);
sendContainerReport(info, 5 * GIGABYTE);
@@ -183,9 +186,10 @@ public class TestContainerCloser {
long runCount = mapping.getCloser().getThreadRunCount();
for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) {
- ContainerInfo info = mapping.allocateContainer(
+ ContainerWithPipeline containerWithPipeline = mapping.allocateContainer(
HddsProtos.ReplicationType.STAND_ALONE,
HddsProtos.ReplicationFactor.ONE, "ozone");
+ ContainerInfo info = containerWithPipeline.getContainerInfo();
mapping.updateContainerState(info.getContainerID(), CREATE);
mapping.updateContainerState(info.getContainerID(), CREATED);
sendContainerReport(info, 5 * GIGABYTE);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 5ad28f6..98b0a28 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -25,7 +25,7 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
@@ -151,11 +151,11 @@ public class TestContainerPlacement {
assertTrue(nodeManager.isOutOfChillMode());
- ContainerInfo containerInfo = containerManager.allocateContainer(
+ ContainerWithPipeline containerWithPipeline = containerManager.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(), "OZONE");
assertEquals(xceiverClientManager.getFactor().getNumber(),
- containerInfo.getPipeline().getMachines().size());
+ containerWithPipeline.getPipeline().getMachines().size());
} finally {
IOUtils.closeQuietly(containerManager);
IOUtils.closeQuietly(nodeManager);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
index 4f3b143..e2267da 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java
@@ -24,9 +24,9 @@ import org.apache.commons.cli.Options;
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
import org.apache.hadoop.hdds.scm.cli.SCMCLI;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import java.io.IOException;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
/**
* The handler of close container command.
@@ -51,15 +51,15 @@ public class CloseContainerHandler extends OzoneCommandHandler {
}
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
- ContainerInfo container = getScmClient().
- getContainer(Long.parseLong(containerID));
+ ContainerWithPipeline container = getScmClient().
+ getContainerWithPipeline(Long.parseLong(containerID));
if (container == null) {
throw new IOException("Cannot close an non-exist container "
+ containerID);
}
logOut("Closing container : %s.", containerID);
- getScmClient().closeContainer(container.getContainerID(),
- container.getPipeline());
+ getScmClient()
+ .closeContainer(container.getContainerInfo().getContainerID());
logOut("Container closed.");
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
index 20a6d9e..1b26665 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/DeleteContainerHandler.java
@@ -25,9 +25,9 @@ import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import java.io.IOException;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
@@ -60,7 +60,7 @@ public class DeleteContainerHandler extends OzoneCommandHandler {
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
- ContainerInfo container = getScmClient().getContainer(
+ ContainerWithPipeline container = getScmClient().getContainerWithPipeline(
Long.parseLong(containerID));
if (container == null) {
throw new IOException("Cannot delete an non-exist container "
@@ -68,8 +68,9 @@ public class DeleteContainerHandler extends OzoneCommandHandler {
}
logOut("Deleting container : %s.", containerID);
- getScmClient().deleteContainer(container.getContainerID(),
- container.getPipeline(), cmd.hasOption(OPT_FORCE));
+ getScmClient()
+ .deleteContainer(container.getContainerInfo().getContainerID(),
+ container.getPipeline(), cmd.hasOption(OPT_FORCE));
logOut("Container %s deleted.", containerID);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
index 6027bec..3716ace 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/InfoContainerHandler.java
@@ -24,7 +24,6 @@ import org.apache.commons.cli.Option;
import org.apache.commons.cli.Options;
import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
.ContainerData;
@@ -33,6 +32,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
import java.io.IOException;
import java.util.stream.Collectors;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.CMD_WIDTH;
import static org.apache.hadoop.hdds.scm.cli.SCMCLI.HELP_OP;
@@ -68,13 +68,12 @@ public class InfoContainerHandler extends OzoneCommandHandler {
}
}
String containerID = cmd.getOptionValue(OPT_CONTAINER_ID);
- ContainerInfo container = getScmClient().
- getContainer(Long.parseLong(containerID));
+ ContainerWithPipeline container = getScmClient().
+ getContainerWithPipeline(Long.parseLong(containerID));
Preconditions.checkNotNull(container, "Container cannot be null");
- ContainerData containerData =
- getScmClient().readContainer(container.getContainerID(),
- container.getPipeline());
+ ContainerData containerData = getScmClient().readContainer(container
+ .getContainerInfo().getContainerID(), container.getPipeline());
// Print container report info.
logOut("Container id: %s", containerID);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
index e1a2918..edd85aa 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupInputStream.java
@@ -21,8 +21,8 @@ import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
@@ -271,17 +271,17 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
KsmKeyLocationInfo ksmKeyLocationInfo = keyLocationInfos.get(i);
BlockID blockID = ksmKeyLocationInfo.getBlockID();
long containerID = blockID.getContainerID();
- ContainerInfo container =
- storageContainerLocationClient.getContainer(containerID);
- XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(container.getPipeline(), containerID);
+ ContainerWithPipeline containerWithPipeline =
+ storageContainerLocationClient.getContainerWithPipeline(containerID);
+ XceiverClientSpi xceiverClient = xceiverClientManager
+ .acquireClient(containerWithPipeline.getPipeline(), containerID);
boolean success = false;
containerKey = ksmKeyLocationInfo.getLocalID();
try {
LOG.debug("get key accessing {} {}",
containerID, containerKey);
groupInputStream.streamOffset[i] = length;
- ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
+ ContainerProtos.KeyData containerKeyData = OzoneContainerTranslation
.containerKeyDataForRead(blockID);
ContainerProtos.GetKeyResponseProto response = ContainerProtocolCalls
.getKey(xceiverClient, containerKeyData, requestId);
@@ -292,7 +292,8 @@ public class ChunkGroupInputStream extends InputStream implements Seekable {
}
success = true;
ChunkInputStream inputStream = new ChunkInputStream(
- ksmKeyLocationInfo.getBlockID(), xceiverClientManager, xceiverClient,
+ ksmKeyLocationInfo.getBlockID(), xceiverClientManager,
+ xceiverClient,
chunks, requestId);
groupInputStream.addStream(inputStream,
ksmKeyLocationInfo.getLength());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
index c6e56b3..d1a3b46 100644
--- a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
+++ b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FSExceptionMessages;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfoGroup;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
@@ -163,10 +164,12 @@ public class ChunkGroupOutputStream extends OutputStream {
private void checkKeyLocationInfo(KsmKeyLocationInfo subKeyInfo)
throws IOException {
- ContainerInfo container = scmClient.getContainer(
- subKeyInfo.getContainerID());
+ ContainerWithPipeline containerWithPipeline = scmClient
+ .getContainerWithPipeline(subKeyInfo.getContainerID());
+ ContainerInfo container = containerWithPipeline.getContainerInfo();
+
XceiverClientSpi xceiverClient =
- xceiverClientManager.acquireClient(container.getPipeline(),
+ xceiverClientManager.acquireClient(containerWithPipeline.getPipeline(),
container.getContainerID());
// create container if needed
if (subKeyInfo.getShouldCreateContainer()) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
new file mode 100644
index 0000000..8361bac
--- /dev/null
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/protocolPB/OzonePBHelper.java
@@ -0,0 +1,30 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.protocolPB;
+
+/**
+ * Helper class for converting protobuf objects.
+ */
+public final class OzonePBHelper {
+
+ private OzonePBHelper() {
+ /** Hidden constructor */
+ }
+
+
+}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
index bedd5c4..bb85650 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerStateManager.java
@@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container;
import com.google.common.primitives.Longs;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -30,7 +31,6 @@ import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
-import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.List;
import java.util.NavigableSet;
@@ -71,31 +71,35 @@ public class TestContainerStateManager {
@Test
public void testAllocateContainer() throws IOException {
// Allocate a container and verify the container info
- ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
+ ContainerWithPipeline container1 = scm.getClientProtocolServer()
+ .allocateContainer(
+ xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
ContainerInfo info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED);
- Assert.assertEquals(container1.getContainerID(), info.getContainerID());
+ Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+ info.getContainerID());
Assert.assertEquals(OzoneConsts.GB * 3, info.getAllocatedBytes());
Assert.assertEquals(containerOwner, info.getOwner());
Assert.assertEquals(xceiverClientManager.getType(),
- info.getPipeline().getType());
+ info.getReplicationType());
Assert.assertEquals(xceiverClientManager.getFactor(),
- info.getPipeline().getFactor());
+ info.getReplicationFactor());
Assert.assertEquals(HddsProtos.LifeCycleState.ALLOCATED, info.getState());
// Check there are two containers in ALLOCATED state after allocation
- ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
+ ContainerWithPipeline container2 = scm.getClientProtocolServer()
+ .allocateContainer(
+ xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
int numContainers = containerStateManager
.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED).size();
- Assert.assertNotEquals(container1.getContainerID(), container2.getContainerID());
+ Assert.assertNotEquals(container1.getContainerInfo().getContainerID(),
+ container2.getContainerInfo().getContainerID());
Assert.assertEquals(2, numContainers);
}
@@ -105,14 +109,15 @@ public class TestContainerStateManager {
List<ContainerInfo> containers = new ArrayList<>();
for (int i = 0; i < 10; i++) {
- ContainerInfo container = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
- containers.add(container);
+ ContainerWithPipeline container = scm.getClientProtocolServer()
+ .allocateContainer(
+ xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
+ containers.add(container.getContainerInfo());
if (i >= 5) {
- scm.getScmContainerManager()
- .updateContainerState(container.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
+ scm.getScmContainerManager().updateContainerState(container
+ .getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATE);
}
}
@@ -134,34 +139,40 @@ public class TestContainerStateManager {
@Test
public void testGetMatchingContainer() throws IOException {
- ContainerInfo container1 = scm.getClientProtocolServer().
+ ContainerWithPipeline container1 = scm.getClientProtocolServer().
allocateContainer(xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping.updateContainerState(container1.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping.updateContainerState(container1.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATED);
+ xceiverClientManager.getFactor(), containerOwner);
+ scmContainerMapping
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATE);
+ scmContainerMapping
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATED);
- ContainerInfo container2 = scm.getClientProtocolServer().
+ ContainerWithPipeline container2 = scm.getClientProtocolServer().
allocateContainer(xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
+ xceiverClientManager.getFactor(), containerOwner);
ContainerInfo info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN);
- Assert.assertEquals(container1.getContainerID(), info.getContainerID());
+ Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+ info.getContainerID());
info = containerStateManager
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED);
- Assert.assertEquals(container2.getContainerID(), info.getContainerID());
+ Assert.assertEquals(container2.getContainerInfo().getContainerID(),
+ info.getContainerID());
- scmContainerMapping.updateContainerState(container2.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping.updateContainerState(container2.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATED);
+ scmContainerMapping
+ .updateContainerState(container2.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATE);
+ scmContainerMapping
+ .updateContainerState(container2.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATED);
// space has already been allocated in container1, now container 2 should
// be chosen.
@@ -169,7 +180,8 @@ public class TestContainerStateManager {
.getMatchingContainer(OzoneConsts.GB * 3, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN);
- Assert.assertEquals(container2.getContainerID(), info.getContainerID());
+ Assert.assertEquals(container2.getContainerInfo().getContainerID(),
+ info.getContainerID());
}
@Test
@@ -183,30 +195,33 @@ public class TestContainerStateManager {
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
// OPEN -> CLOSING -> CLOSED -> DELETING -> DELETED
- ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
+ ContainerWithPipeline container1 = scm.getClientProtocolServer()
+ .allocateContainer(
+ xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.ALLOCATED).size();
Assert.assertEquals(1, containers);
- scmContainerMapping.updateContainerState(container1.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
+ scmContainerMapping
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.CREATING).size();
Assert.assertEquals(1, containers);
- scmContainerMapping.updateContainerState(container1.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATED);
+ scmContainerMapping
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATED);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN).size();
Assert.assertEquals(1, containers);
scmContainerMapping
- .updateContainerState(container1.getContainerID(),
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.FINALIZE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -214,7 +229,7 @@ public class TestContainerStateManager {
Assert.assertEquals(1, containers);
scmContainerMapping
- .updateContainerState(container1.getContainerID(),
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -222,7 +237,7 @@ public class TestContainerStateManager {
Assert.assertEquals(1, containers);
scmContainerMapping
- .updateContainerState(container1.getContainerID(),
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.DELETE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -230,7 +245,7 @@ public class TestContainerStateManager {
Assert.assertEquals(1, containers);
scmContainerMapping
- .updateContainerState(container1.getContainerID(),
+ .updateContainerState(container1.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLEANUP);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -239,13 +254,15 @@ public class TestContainerStateManager {
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
// DELETING
- ContainerInfo container2 = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping.updateContainerState(container2.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
+ ContainerWithPipeline container2 = scm.getClientProtocolServer()
+ .allocateContainer(
+ xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
+ scmContainerMapping
+ .updateContainerState(container2.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATE);
scmContainerMapping
- .updateContainerState(container2.getContainerID(),
+ .updateContainerState(container2.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.TIMEOUT);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -254,17 +271,21 @@ public class TestContainerStateManager {
// Allocate container1 and update its state from ALLOCATED -> CREATING ->
// OPEN -> CLOSING -> CLOSED
- ContainerInfo container3 = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
- xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping.updateContainerState(container3.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping.updateContainerState(container3.getContainerID(),
- HddsProtos.LifeCycleEvent.CREATED);
- scmContainerMapping.updateContainerState(container3.getContainerID(),
- HddsProtos.LifeCycleEvent.FINALIZE);
+ ContainerWithPipeline container3 = scm.getClientProtocolServer()
+ .allocateContainer(
+ xceiverClientManager.getType(),
+ xceiverClientManager.getFactor(), containerOwner);
+ scmContainerMapping
+ .updateContainerState(container3.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATE);
+ scmContainerMapping
+ .updateContainerState(container3.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.CREATED);
+ scmContainerMapping
+ .updateContainerState(container3.getContainerInfo().getContainerID(),
+ HddsProtos.LifeCycleEvent.FINALIZE);
scmContainerMapping
- .updateContainerState(container3.getContainerID(),
+ .updateContainerState(container3.getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CLOSE);
containers = containerStateManager.getMatchingContainerIDs(containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
@@ -274,12 +295,14 @@ public class TestContainerStateManager {
@Test
public void testUpdatingAllocatedBytes() throws Exception {
- ContainerInfo container1 = scm.getClientProtocolServer().allocateContainer(
- xceiverClientManager.getType(),
+ ContainerWithPipeline container1 = scm.getClientProtocolServer()
+ .allocateContainer(xceiverClientManager.getType(),
xceiverClientManager.getFactor(), containerOwner);
- scmContainerMapping.updateContainerState(container1.getContainerID(),
+ scmContainerMapping.updateContainerState(container1
+ .getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATE);
- scmContainerMapping.updateContainerState(container1.getContainerID(),
+ scmContainerMapping.updateContainerState(container1
+ .getContainerInfo().getContainerID(),
HddsProtos.LifeCycleEvent.CREATED);
Random ran = new Random();
@@ -292,18 +315,18 @@ public class TestContainerStateManager {
.getMatchingContainer(size, containerOwner,
xceiverClientManager.getType(), xceiverClientManager.getFactor(),
HddsProtos.LifeCycleState.OPEN);
- Assert.assertEquals(container1.getContainerID(), info.getContainerID());
+ Assert.assertEquals(container1.getContainerInfo().getContainerID(),
+ info.getContainerID());
ContainerMapping containerMapping =
- (ContainerMapping)scmContainerMapping;
+ (ContainerMapping) scmContainerMapping;
// manually trigger a flush, this will persist the allocated bytes value
// to disk
containerMapping.flushContainerInfo();
// the persisted value should always be equal to allocated size.
- byte[] containerBytes =
- containerMapping.getContainerStore().get(
- Longs.toByteArray(container1.getContainerID()));
+ byte[] containerBytes = containerMapping.getContainerStore().get(
+ Longs.toByteArray(container1.getContainerInfo().getContainerID()));
HddsProtos.SCMContainerInfo infoProto =
HddsProtos.SCMContainerInfo.PARSER.parseFrom(containerBytes);
ContainerInfo currentInfo = ContainerInfo.fromProtobuf(infoProto);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index d4c9d4f..129cf04 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ozone;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -28,7 +28,6 @@ import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.client.ContainerOperationClient;
import org.apache.hadoop.hdds.scm.client.ScmClient;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB;
import org.junit.AfterClass;
@@ -78,12 +77,12 @@ public class TestContainerOperations {
*/
@Test
public void testCreate() throws Exception {
- ContainerInfo container = storageClient.createContainer(HddsProtos
+ ContainerWithPipeline container = storageClient.createContainer(HddsProtos
.ReplicationType.STAND_ALONE, HddsProtos.ReplicationFactor
.ONE, "OZONE");
- assertEquals(container.getContainerID(),
- storageClient.getContainer(container.getContainerID()).
- getContainerID());
+ assertEquals(container.getContainerInfo().getContainerID(), storageClient
+ .getContainer(container.getContainerInfo().getContainerID())
+ .getContainerID());
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0c1d8f2..d07097c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -22,7 +22,7 @@ import java.io.IOException;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.server.SCMClientProtocolServer;
import org.apache.hadoop.hdds.scm.server.SCMStorage;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -131,7 +131,7 @@ public class TestStorageContainerManager {
}
try {
- ContainerInfo container2 = mockClientServer
+ ContainerWithPipeline container2 = mockClientServer
.allocateContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, "OZONE");
if (expectPermissionDenied) {
@@ -144,7 +144,7 @@ public class TestStorageContainerManager {
}
try {
- ContainerInfo container3 = mockClientServer
+ ContainerWithPipeline container3 = mockClientServer
.allocateContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, "OZONE");
if (expectPermissionDenied) {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
index c937980..4c2a904 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
@@ -23,7 +23,7 @@ import com.google.common.primitives.Longs;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -158,9 +158,11 @@ public class TestStorageContainerManagerHelper {
private MetadataStore getContainerMetadata(Long containerID)
throws IOException {
- ContainerInfo container = cluster.getStorageContainerManager()
- .getClientProtocolServer().getContainer(containerID);
- DatanodeDetails leadDN = container.getPipeline().getLeader();
+ ContainerWithPipeline containerWithPipeline = cluster
+ .getStorageContainerManager().getClientProtocolServer()
+ .getContainerWithPipeline(containerID);
+
+ DatanodeDetails leadDN = containerWithPipeline.getPipeline().getLeader();
OzoneContainer containerServer =
getContainerServerByDatanodeUuid(leadDN.getUuidString());
ContainerData containerData = containerServer.getContainerManager()
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
index cafe5db..214382e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClient.java
@@ -390,8 +390,8 @@ public class TestOzoneRpcClient {
keyInfo.getLatestVersionLocations().getLocationList()) {
ContainerInfo container =
storageContainerLocationClient.getContainer(info.getContainerID());
- if ((container.getPipeline().getFactor() != replicationFactor) ||
- (container.getPipeline().getType() != replicationType)) {
+ if (!container.getReplicationFactor().equals(replicationFactor) || (
+ container.getReplicationType() != replicationType)) {
return false;
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 265c82b..3e514e7 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.scm.container.ContainerID;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.HddsDatanodeService;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -35,7 +33,6 @@ import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.statemachine.SCMConnectionManager;
import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
@@ -112,9 +109,9 @@ public class TestCloseContainerByPipeline {
.get(0).getBlocksLatestVersionOnly().get(0);
long containerID = ksmKeyLocationInfo.getContainerID();
- List<DatanodeDetails> datanodes =
- cluster.getStorageContainerManager().getContainerInfo(containerID)
- .getPipeline().getMachines();
+ List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
+ .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getPipeline().getMachines();
Assert.assertTrue(datanodes.size() == 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
@@ -167,9 +164,9 @@ public class TestCloseContainerByPipeline {
.get(0).getBlocksLatestVersionOnly().get(0);
long containerID = ksmKeyLocationInfo.getContainerID();
- List<DatanodeDetails> datanodes =
- cluster.getStorageContainerManager().getContainerInfo(containerID)
- .getPipeline().getMachines();
+ List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
+ .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getPipeline().getMachines();
Assert.assertTrue(datanodes.size() == 1);
DatanodeDetails datanodeDetails = datanodes.get(0);
@@ -220,9 +217,9 @@ public class TestCloseContainerByPipeline {
.get(0).getBlocksLatestVersionOnly().get(0);
long containerID = ksmKeyLocationInfo.getContainerID();
- List<DatanodeDetails> datanodes =
- cluster.getStorageContainerManager().getContainerInfo(containerID)
- .getPipeline().getMachines();
+ List<DatanodeDetails> datanodes = cluster.getStorageContainerManager()
+ .getScmContainerManager().getContainerWithPipeline(containerID)
+ .getPipeline().getMachines();
Assert.assertTrue(datanodes.size() == 3);
GenericTestUtils.LogCapturer logCapturer =
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
index bafba32..1cc7ff8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
import org.apache.hadoop.hdds.client.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -32,7 +33,6 @@ import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.junit.AfterClass;
import org.junit.BeforeClass;
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
index b1e9d26..144c562 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestAllocateContainer.java
@@ -17,14 +17,12 @@
*/
package org.apache.hadoop.ozone.scm;
-import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.junit.AfterClass;
import org.junit.Assert;
import org.junit.BeforeClass;
@@ -68,7 +66,7 @@ public class TestAllocateContainer {
@Test
public void testAllocate() throws Exception {
- ContainerInfo container = storageContainerLocationClient.allocateContainer(
+ ContainerWithPipeline container = storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
xceiverClientManager.getFactor(),
containerOwner);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
index ce1fe46..42bb936 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSmallFile.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.ozone.scm;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -81,17 +81,18 @@ public class TestContainerSmallFile {
@Test
public void testAllocateWrite() throws Exception {
String traceID = UUID.randomUUID().toString();
- ContainerInfo container =
+ ContainerWithPipeline container =
storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- XceiverClientSpi client = xceiverClientManager.acquireClient(
- container.getPipeline(), container.getContainerID());
+ XceiverClientSpi client = xceiverClientManager
+ .acquireClient(container.getPipeline(),
+ container.getContainerInfo().getContainerID());
ContainerProtocolCalls.createContainer(client,
- container.getContainerID(), traceID);
+ container.getContainerInfo().getContainerID(), traceID);
BlockID blockID = ContainerTestHelper.getTestBlockID(
- container.getContainerID());
+ container.getContainerInfo().getContainerID());
ContainerProtocolCalls.writeSmallFile(client, blockID,
"data123".getBytes(), traceID);
ContainerProtos.GetSmallFileResponseProto response =
@@ -104,20 +105,21 @@ public class TestContainerSmallFile {
@Test
public void testInvalidKeyRead() throws Exception {
String traceID = UUID.randomUUID().toString();
- ContainerInfo container =
+ ContainerWithPipeline container =
storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- XceiverClientSpi client = xceiverClientManager.acquireClient(
- container.getPipeline(), container.getContainerID());
+ XceiverClientSpi client = xceiverClientManager
+ .acquireClient(container.getPipeline(),
+ container.getContainerInfo().getContainerID());
ContainerProtocolCalls.createContainer(client,
- container.getContainerID(), traceID);
+ container.getContainerInfo().getContainerID(), traceID);
thrown.expect(StorageContainerException.class);
thrown.expectMessage("Unable to find the key");
BlockID blockID = ContainerTestHelper.getTestBlockID(
- container.getContainerID());
+ container.getContainerInfo().getContainerID());
// Try to read a Key Container Name
ContainerProtos.GetSmallFileResponseProto response =
ContainerProtocolCalls.readSmallFile(client, blockID, traceID);
@@ -128,20 +130,20 @@ public class TestContainerSmallFile {
public void testInvalidContainerRead() throws Exception {
String traceID = UUID.randomUUID().toString();
long nonExistContainerID = 8888L;
- ContainerInfo container =
+ ContainerWithPipeline container =
storageContainerLocationClient.allocateContainer(
xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- XceiverClientSpi client = xceiverClientManager.
- acquireClient(container.getPipeline(), container.getContainerID());
+ XceiverClientSpi client = xceiverClientManager
+ .acquireClient(container.getPipeline(),
+ container.getContainerInfo().getContainerID());
ContainerProtocolCalls.createContainer(client,
- container.getContainerID(), traceID);
+ container.getContainerInfo().getContainerID(), traceID);
BlockID blockID = ContainerTestHelper.getTestBlockID(
- container.getContainerID());
+ container.getContainerInfo().getContainerID());
ContainerProtocolCalls.writeSmallFile(client, blockID,
"data123".getBytes(), traceID);
-
thrown.expect(StorageContainerException.class);
thrown.expectMessage("Unable to find the container");
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
index 732221a..a6bb586 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.ozone.scm;
import com.google.common.primitives.Longs;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -136,7 +137,7 @@ public class TestSCMCli {
private boolean containerExist(long containerID) {
try {
ContainerInfo container = scm.getClientProtocolServer()
- .getContainer(containerID);
+ .getContainerWithPipeline(containerID).getContainerInfo();
return container != null
&& containerID == container.getContainerID();
} catch (IOException e) {
@@ -157,31 +158,34 @@ public class TestSCMCli {
// 1. Test to delete a non-empty container.
// ****************************************
// Create an non-empty container
- ContainerInfo container = containerOperationClient
+ ContainerWithPipeline container = containerOperationClient
.createContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
ContainerData cdata = ContainerData
.getFromProtBuf(containerOperationClient.readContainer(
- container.getContainerID(), container.getPipeline()), conf);
- KeyUtils.getDB(cdata, conf).put(Longs.toByteArray(container.getContainerID()),
- "someKey".getBytes());
- Assert.assertTrue(containerExist(container.getContainerID()));
+ container.getContainerInfo().getContainerID()), conf);
+ KeyUtils.getDB(cdata, conf)
+ .put(Longs.toByteArray(container.getContainerInfo().getContainerID()),
+ "someKey".getBytes());
+ Assert.assertTrue(
+ containerExist(container.getContainerInfo().getContainerID()));
// Gracefully delete a container should fail because it is open.
- delCmd = new String[] {"-container", "-delete", "-c",
- Long.toString(container.getContainerID())};
+ delCmd = new String[]{"-container", "-delete", "-c",
+ Long.toString(container.getContainerInfo().getContainerID())};
testErr = new ByteArrayOutputStream();
ByteArrayOutputStream out = new ByteArrayOutputStream();
exitCode = runCommandAndGetOutput(delCmd, out, testErr);
assertEquals(EXECUTION_ERROR, exitCode);
assertTrue(testErr.toString()
.contains("Deleting an open container is not allowed."));
- Assert.assertTrue(containerExist(container.getContainerID()));
+ Assert.assertTrue(
+ containerExist(container.getContainerInfo().getContainerID()));
// Close the container
containerOperationClient.closeContainer(
- container.getContainerID(), container.getPipeline());
+ container.getContainerInfo().getContainerID());
// Gracefully delete a container should fail because it is not empty.
testErr = new ByteArrayOutputStream();
@@ -189,45 +193,49 @@ public class TestSCMCli {
assertEquals(EXECUTION_ERROR, exitCode2);
assertTrue(testErr.toString()
.contains("Container cannot be deleted because it is not empty."));
- Assert.assertTrue(containerExist(container.getContainerID()));
+ Assert.assertTrue(
+ containerExist(container.getContainerInfo().getContainerID()));
// Try force delete again.
- delCmd = new String[] {"-container", "-delete", "-c",
- Long.toString(container.getContainerID()), "-f"};
+ delCmd = new String[]{"-container", "-delete", "-c",
+ Long.toString(container.getContainerInfo().getContainerID()), "-f"};
exitCode = runCommandAndGetOutput(delCmd, out, null);
assertEquals("Expected success, found:", ResultCode.SUCCESS, exitCode);
- assertFalse(containerExist(container.getContainerID()));
+ assertFalse(containerExist(container.getContainerInfo().getContainerID()));
// ****************************************
// 2. Test to delete an empty container.
// ****************************************
// Create an empty container
- ContainerInfo emptyContainer = containerOperationClient
+ ContainerWithPipeline emptyContainer = containerOperationClient
.createContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- containerOperationClient.closeContainer(emptyContainer.getContainerID(),
- container.getPipeline());
- Assert.assertTrue(containerExist(emptyContainer.getContainerID()));
+ containerOperationClient
+ .closeContainer(emptyContainer.getContainerInfo().getContainerID());
+ Assert.assertTrue(
+ containerExist(emptyContainer.getContainerInfo().getContainerID()));
// Successfully delete an empty container.
- delCmd = new String[] {"-container", "-delete", "-c",
- Long.toString(emptyContainer.getContainerID())};
+ delCmd = new String[]{"-container", "-delete", "-c",
+ Long.toString(emptyContainer.getContainerInfo().getContainerID())};
exitCode = runCommandAndGetOutput(delCmd, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
- assertFalse(containerExist(emptyContainer.getContainerID()));
+ assertFalse(
+ containerExist(emptyContainer.getContainerInfo().getContainerID()));
// After the container is deleted,
// another container can now be recreated.
- ContainerInfo newContainer = containerOperationClient.
+ ContainerWithPipeline newContainer = containerOperationClient.
createContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- Assert.assertTrue(containerExist(newContainer.getContainerID()));
+ Assert.assertTrue(
+ containerExist(newContainer.getContainerInfo().getContainerID()));
// ****************************************
// 3. Test to delete a non-exist container.
// ****************************************
- long nonExistContainerID = ContainerTestHelper.getTestContainerID();
- delCmd = new String[] {"-container", "-delete", "-c",
+ long nonExistContainerID = ContainerTestHelper.getTestContainerID();
+ delCmd = new String[]{"-container", "-delete", "-c",
Long.toString(nonExistContainerID)};
testErr = new ByteArrayOutputStream();
exitCode = runCommandAndGetOutput(delCmd, out, testErr);
@@ -250,45 +258,33 @@ public class TestSCMCli {
"LeaderID: %s\n" +
"Datanodes: [%s]\n";
- String formatStrWithHash =
- "Container id: %s\n" +
- "Container State: %s\n" +
- "Container Hash: %s\n" +
- "Container DB Path: %s\n" +
- "Container Path: %s\n" +
- "Container Metadata: {%s}\n" +
- "LeaderID: %s\n" +
- "Datanodes: [%s]\n";
-
// Test a non-exist container
String containerID =
Long.toString(ContainerTestHelper.getTestContainerID());
- String[] info = { "-container", "-info", containerID };
+ String[] info = {"-container", "-info", containerID};
int exitCode = runCommandAndGetOutput(info, null, null);
assertEquals("Expected Execution Error, Did not find that.",
EXECUTION_ERROR, exitCode);
// Create an empty container.
- ContainerInfo container = containerOperationClient
+ ContainerWithPipeline container = containerOperationClient
.createContainer(xceiverClientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- ContainerData data = ContainerData
- .getFromProtBuf(containerOperationClient.
- readContainer(container.getContainerID(),
- container.getPipeline()), conf);
+ ContainerData data = ContainerData.getFromProtBuf(containerOperationClient
+ .readContainer(container.getContainerInfo().getContainerID()), conf);
- info = new String[] { "-container", "-info", "-c",
- Long.toString(container.getContainerID()) };
+ info = new String[]{"-container", "-info", "-c",
+ Long.toString(container.getContainerInfo().getContainerID())};
ByteArrayOutputStream out = new ByteArrayOutputStream();
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals("Expected Success, did not find it.", ResultCode.SUCCESS,
- exitCode);
+ exitCode);
String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
- String expected =
- String.format(formatStr, container.getContainerID(), openStatus,
- data.getDBPath(), data.getContainerPath(), "",
- datanodeDetails.getHostName(), datanodeDetails.getHostName());
+ String expected = String.format(formatStr, container.getContainerInfo()
+ .getContainerID(), openStatus, data.getDBPath(),
+ data.getContainerPath(), "", datanodeDetails.getHostName(),
+ datanodeDetails.getHostName());
assertEquals(expected, out.toString());
out.reset();
@@ -299,40 +295,39 @@ public class TestSCMCli {
HddsProtos.ReplicationFactor.ONE, containerOwner);
data = ContainerData
.getFromProtBuf(containerOperationClient.readContainer(
- container.getContainerID(), container.getPipeline()), conf);
+ container.getContainerInfo().getContainerID()), conf);
KeyUtils.getDB(data, conf)
.put(containerID.getBytes(), "someKey".getBytes());
- info = new String[] { "-container", "-info", "-c",
- Long.toString(container.getContainerID()) };
+ info = new String[]{"-container", "-info", "-c",
+ Long.toString(container.getContainerInfo().getContainerID())};
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
- expected = String.format(formatStr, container.getContainerID(), openStatus,
- data.getDBPath(), data.getContainerPath(), "",
- datanodeDetails.getHostName(), datanodeDetails.getHostName());
+ expected = String.format(formatStr, container.getContainerInfo().
+ getContainerID(), openStatus, data.getDBPath(),
+ data.getContainerPath(), "", datanodeDetails.getHostName(),
+ datanodeDetails.getHostName());
assertEquals(expected, out.toString());
out.reset();
-
// Close last container and test info again.
- containerOperationClient.closeContainer(
- container.getContainerID(), container.getPipeline());
+ containerOperationClient
+ .closeContainer(container.getContainerInfo().getContainerID());
- info = new String[] { "-container", "-info", "-c",
- Long.toString(container.getContainerID()) };
+ info = new String[]{"-container", "-info", "-c",
+ Long.toString(container.getContainerInfo().getContainerID())};
exitCode = runCommandAndGetOutput(info, out, null);
assertEquals(ResultCode.SUCCESS, exitCode);
- data = ContainerData
- .getFromProtBuf(containerOperationClient.readContainer(
- container.getContainerID(), container.getPipeline()), conf);
+ data = ContainerData.getFromProtBuf(containerOperationClient
+ .readContainer(container.getContainerInfo().getContainerID()), conf);
openStatus = data.isOpen() ? "OPEN" : "CLOSED";
expected = String
- .format(formatStr, container.getContainerID(), openStatus,
- data.getDBPath(), data.getContainerPath(), "",
+ .format(formatStr, container.getContainerInfo().getContainerID(),
+ openStatus, data.getDBPath(), data.getContainerPath(), "",
datanodeDetails.getHostName(), datanodeDetails.getHostName());
assertEquals(expected, out.toString());
}
@@ -360,10 +355,10 @@ public class TestSCMCli {
// Create 20 containers for testing.
List<ContainerInfo> containers = new ArrayList<>();
for (int index = 0; index < 20; index++) {
- ContainerInfo container = containerOperationClient.createContainer(
+ ContainerWithPipeline container = containerOperationClient.createContainer(
xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
containerOwner);
- containers.add(container);
+ containers.add(container.getContainerInfo());
}
ByteArrayOutputStream out = new ByteArrayOutputStream();
@@ -417,11 +412,11 @@ public class TestSCMCli {
@Test
public void testCloseContainer() throws Exception {
- long containerID = containerOperationClient
- .createContainer(xceiverClientManager.getType(),
- HddsProtos.ReplicationFactor.ONE, containerOwner).getContainerID();
+ long containerID = containerOperationClient.createContainer(
+ xceiverClientManager.getType(), HddsProtos.ReplicationFactor.ONE,
+ containerOwner).getContainerInfo().getContainerID();
ContainerInfo container = scm.getClientProtocolServer()
- .getContainer(containerID);
+ .getContainerWithPipeline(containerID).getContainerInfo();
assertNotNull(container);
assertEquals(containerID, container.getContainerID());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ca4f0ce/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
index 56f3c7a..a75264e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestXceiverClientManager.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.ozone.scm;
import com.google.common.cache.Cache;
import org.apache.commons.lang3.RandomStringUtils;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
+import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -98,22 +98,25 @@ public class TestXceiverClientManager {
shouldUseGrpc);
XceiverClientManager clientManager = new XceiverClientManager(conf);
- ContainerInfo container1 = storageContainerLocationClient
+ ContainerWithPipeline container1 = storageContainerLocationClient
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
containerOwner);
- XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
- container1.getContainerID());
+ XceiverClientSpi client1 = clientManager
+ .acquireClient(container1.getPipeline(),
+ container1.getContainerInfo().getContainerID());
Assert.assertEquals(1, client1.getRefcount());
- ContainerInfo container2 = storageContainerLocationClient
+ ContainerWithPipeline container2 = storageContainerLocationClient
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
containerOwner);
- XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
- container2.getContainerID());
+ XceiverClientSpi client2 = clientManager
+ .acquireClient(container2.getPipeline(),
+ container2.getContainerInfo().getContainerID());
Assert.assertEquals(1, client2.getRefcount());
- XceiverClientSpi client3 = clientManager.acquireClient(container1.getPipeline(),
- container1.getContainerID());
+ XceiverClientSpi client3 = clientManager
+ .acquireClient(container1.getPipeline(),
+ container1.getContainerInfo().getContainerID());
Assert.assertEquals(2, client3.getRefcount());
Assert.assertEquals(2, client1.getRefcount());
Assert.assertEquals(client1, client3);
@@ -132,32 +135,35 @@ public class TestXceiverClientManager {
Cache<Long, XceiverClientSpi> cache =
clientManager.getClientCache();
- ContainerInfo container1 =
+ ContainerWithPipeline container1 =
storageContainerLocationClient.allocateContainer(
clientManager.getType(), HddsProtos.ReplicationFactor.ONE,
containerOwner);
- XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
- container1.getContainerID());
+ XceiverClientSpi client1 = clientManager
+ .acquireClient(container1.getPipeline(),
+ container1.getContainerInfo().getContainerID());
Assert.assertEquals(1, client1.getRefcount());
Assert.assertEquals(container1.getPipeline(),
client1.getPipeline());
- ContainerInfo container2 =
+ ContainerWithPipeline container2 =
storageContainerLocationClient.allocateContainer(
clientManager.getType(),
HddsProtos.ReplicationFactor.ONE, containerOwner);
- XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
- container2.getContainerID());
+ XceiverClientSpi client2 = clientManager
+ .acquireClient(container2.getPipeline(),
+ container2.getContainerInfo().getContainerID());
Assert.assertEquals(1, client2.getRefcount());
Assert.assertNotEquals(client1, client2);
// least recent container (i.e containerName1) is evicted
- XceiverClientSpi nonExistent1 = cache.getIfPresent(container1.getContainerID());
+ XceiverClientSpi nonExistent1 = cache
+ .getIfPresent(container1.getContainerInfo().getContainerID());
Assert.assertEquals(null, nonExistent1);
// However container call should succeed because of refcount on the client.
String traceID1 = "trace" + RandomStringUtils.randomNumeric(4);
ContainerProtocolCalls.createContainer(client1,
- container1.getContainerID(), traceID1);
+ container1.getContainerInfo().getContainerID(), traceID1);
// After releasing the client, this connection should be closed
// and any container operations should fail
@@ -166,7 +172,7 @@ public class TestXceiverClientManager {
String expectedMessage = "This channel is not connected.";
try {
ContainerProtocolCalls.createContainer(client1,
- container1.getContainerID(), traceID1);
+ container1.getContainerInfo().getContainerID(), traceID1);
Assert.fail("Create container should throw exception on closed"
+ "client");
} catch (Exception e) {
@@ -186,28 +192,30 @@ public class TestXceiverClientManager {
Cache<Long, XceiverClientSpi> cache =
clientManager.getClientCache();
- ContainerInfo container1 =
+ ContainerWithPipeline container1 =
storageContainerLocationClient.allocateContainer(
clientManager.getType(),
clientManager.getFactor(), containerOwner);
- XceiverClientSpi client1 = clientManager.acquireClient(container1.getPipeline(),
- container1.getContainerID());
+ XceiverClientSpi client1 = clientManager
+ .acquireClient(container1.getPipeline(),
+ container1.getContainerInfo().getContainerID());
Assert.assertEquals(1, client1.getRefcount());
clientManager.releaseClient(client1);
Assert.assertEquals(0, client1.getRefcount());
- ContainerInfo container2 = storageContainerLocationClient
+ ContainerWithPipeline container2 = storageContainerLocationClient
.allocateContainer(clientManager.getType(), clientManager.getFactor(),
containerOwner);
- XceiverClientSpi client2 = clientManager.acquireClient(container2.getPipeline(),
- container2.getContainerID());
+ XceiverClientSpi client2 = clientManager
+ .acquireClient(container2.getPipeline(),
+ container2.getContainerInfo().getContainerID());
Assert.assertEquals(1, client2.getRefcount());
Assert.assertNotEquals(client1, client2);
-
// now client 1 should be evicted
- XceiverClientSpi nonExistent = cache.getIfPresent(container1.getContainerID());
+ XceiverClientSpi nonExistent = cache
+ .getIfPresent(container1.getContainerInfo().getContainerID());
Assert.assertEquals(null, nonExistent);
// Any container operation should now fail
@@ -215,7 +223,7 @@ public class TestXceiverClientManager {
String expectedMessage = "This channel is not connected.";
try {
ContainerProtocolCalls.createContainer(client1,
- container1.getContainerID(), traceID2);
+ container1.getContainerInfo().getContainerID(), traceID2);
Assert.fail("Create container should throw exception on closed"
+ "client");
} catch (Exception e) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org