You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/07/24 05:04:31 UTC
hadoop git commit: HDDS-258. Helper methods to generate NodeReport
and ContainerReport for testing. Contributed by Nanda Kumar.
Repository: hadoop
Updated Branches:
refs/heads/trunk 17a87977f -> 2ced3efe9
HDDS-258. Helper methods to generate NodeReport and ContainerReport for testing. Contributed by Nanda Kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2ced3efe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2ced3efe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2ced3efe
Branch: refs/heads/trunk
Commit: 2ced3efe94eecc3e6076be1f0341bf6a2f2affab
Parents: 17a8797
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Mon Jul 23 21:29:44 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Mon Jul 23 22:04:20 2018 -0700
----------------------------------------------------------------------
.../hadoop/hdds/scm/node/SCMNodeManager.java | 27 +-
.../org/apache/hadoop/hdds/scm/TestUtils.java | 372 ++++++++++++++-----
.../command/TestCommandStatusReportHandler.java | 2 +-
.../hdds/scm/container/MockNodeManager.java | 4 +-
.../scm/container/TestContainerMapping.java | 4 +-
.../container/closer/TestContainerCloser.java | 2 +-
.../TestSCMContainerPlacementCapacity.java | 2 +-
.../TestSCMContainerPlacementRandom.java | 2 +-
.../replication/TestReplicationManager.java | 7 +-
.../hdds/scm/node/TestContainerPlacement.java | 4 -
.../hadoop/hdds/scm/node/TestNodeManager.java | 80 ++--
.../hdds/scm/node/TestNodeReportHandler.java | 19 +-
.../scm/node/TestSCMNodeStorageStatMap.java | 13 +-
.../TestSCMDatanodeHeartbeatDispatcher.java | 4 +-
.../ozone/container/common/TestEndPoint.java | 74 ++--
.../hadoop/ozone/TestMiniOzoneCluster.java | 20 +-
.../container/metrics/TestContainerMetrics.java | 2 +-
.../container/ozoneimpl/TestOzoneContainer.java | 2 +-
.../container/server/TestContainerServer.java | 4 +-
19 files changed, 406 insertions(+), 238 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 7370b07..fca08bd 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -342,7 +342,8 @@ public class SCMNodeManager
public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
return VersionResponse.newBuilder()
.setVersion(this.version.getVersion())
- .addValue(OzoneConsts.SCM_ID, this.scmManager.getScmStorage().getScmId())
+ .addValue(OzoneConsts.SCM_ID,
+ this.scmManager.getScmStorage().getScmId())
.addValue(OzoneConsts.CLUSTER_ID, this.scmManager.getScmStorage()
.getClusterID())
.build();
@@ -364,15 +365,11 @@ public class SCMNodeManager
public RegisteredCommand register(
DatanodeDetails datanodeDetails, NodeReportProto nodeReport) {
- String hostname = null;
- String ip = null;
InetAddress dnAddress = Server.getRemoteIp();
if (dnAddress != null) {
// Mostly called inside an RPC, update ip and peer hostname
- hostname = dnAddress.getHostName();
- ip = dnAddress.getHostAddress();
- datanodeDetails.setHostName(hostname);
- datanodeDetails.setIpAddress(ip);
+ datanodeDetails.setHostName(dnAddress.getHostName());
+ datanodeDetails.setIpAddress(dnAddress.getHostAddress());
}
UUID dnId = datanodeDetails.getUuid();
try {
@@ -390,14 +387,12 @@ public class SCMNodeManager
LOG.trace("Datanode is already registered. Datanode: {}",
datanodeDetails.toString());
}
- RegisteredCommand.Builder builder =
- RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
- .setDatanodeUUID(datanodeDetails.getUuidString())
- .setClusterID(this.clusterID);
- if (hostname != null && ip != null) {
- builder.setHostname(hostname).setIpAddress(ip);
- }
- return builder.build();
+ return RegisteredCommand.newBuilder().setErrorCode(ErrorCode.success)
+ .setDatanodeUUID(datanodeDetails.getUuidString())
+ .setClusterID(this.clusterID)
+ .setHostname(datanodeDetails.getHostName())
+ .setIpAddress(datanodeDetails.getIpAddress())
+ .build();
}
/**
@@ -430,7 +425,7 @@ public class SCMNodeManager
*/
@Override
public void processNodeReport(UUID dnUuid, NodeReportProto nodeReport) {
- this.updateNodeStat(dnUuid, nodeReport);
+ this.updateNodeStat(dnUuid, nodeReport);
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
index 8d7a2c2..c466570 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestUtils.java
@@ -18,7 +18,9 @@ package org.apache.hadoop.hdds.scm;
import com.google.common.base.Preconditions;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
+ .StorageContainerDatanodeProtocolProtos.ContainerInfo;
+import org.apache.hadoop.hdds.protocol.proto
+ .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
import org.apache.hadoop.hdds.protocol
.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
import org.apache.hadoop.hdds.protocol
@@ -31,40 +33,181 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
import org.apache.hadoop.hdds.scm.node.SCMNodeManager;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
+import java.io.File;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.List;
-import java.util.Random;
import java.util.UUID;
+import java.util.concurrent.ThreadLocalRandom;
/**
* Stateless helper functions to handler scm/datanode connection.
*/
public final class TestUtils {
+ private static ThreadLocalRandom random = ThreadLocalRandom.current();
+
private TestUtils() {
}
- public static DatanodeDetails getDatanodeDetails(SCMNodeManager nodeManager) {
+ /**
+ * Creates DatanodeDetails with random UUID.
+ *
+ * @return DatanodeDetails
+ */
+ public static DatanodeDetails randomDatanodeDetails() {
+ return createDatanodeDetails(UUID.randomUUID());
+ }
+
+ /**
+ * Creates DatanodeDetails using the given UUID.
+ *
+ * @param uuid Datanode's UUID
+ *
+ * @return DatanodeDetails
+ */
+ private static DatanodeDetails createDatanodeDetails(UUID uuid) {
+ String ipAddress = random.nextInt(256)
+ + "." + random.nextInt(256)
+ + "." + random.nextInt(256)
+ + "." + random.nextInt(256);
+ return createDatanodeDetails(uuid.toString(), "localhost", ipAddress);
+ }
- return getDatanodeDetails(nodeManager, UUID.randomUUID().toString());
+ /**
+ * Generates DatanodeDetails from RegisteredCommand.
+ *
+ * @param registeredCommand registration response from SCM
+ *
+ * @return DatanodeDetails
+ */
+ public static DatanodeDetails getDatanodeDetails(
+ RegisteredCommand registeredCommand) {
+ return createDatanodeDetails(registeredCommand.getDatanodeUUID(),
+ registeredCommand.getHostName(), registeredCommand.getIpAddress());
+ }
+
+ /**
+ * Creates DatanodeDetails with the given information.
+ *
+ * @param uuid Datanode's UUID
+ * @param hostname hostname of Datanode
+ * @param ipAddress ip address of Datanode
+ *
+ * @return DatanodeDetails
+ */
+ private static DatanodeDetails createDatanodeDetails(String uuid,
+ String hostname, String ipAddress) {
+ DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.STANDALONE, 0);
+ DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.RATIS, 0);
+ DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+ DatanodeDetails.Port.Name.REST, 0);
+ DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
+ builder.setUuid(uuid)
+ .setHostName(hostname)
+ .setIpAddress(ipAddress)
+ .addPort(containerPort)
+ .addPort(ratisPort)
+ .addPort(restPort);
+ return builder.build();
+ }
+
+ /**
+ * Creates a random DatanodeDetails and register it with the given
+ * NodeManager.
+ *
+ * @param nodeManager NodeManager
+ *
+ * @return DatanodeDetails
+ */
+ public static DatanodeDetails createRandomDatanodeAndRegister(
+ SCMNodeManager nodeManager) {
+ return getDatanodeDetails(
+ nodeManager.register(randomDatanodeDetails(), null));
+ }
+
+ /**
+ * Get specified number of DatanodeDetails and register them with node
+ * manager.
+ *
+ * @param nodeManager node manager to register the datanode ids.
+ * @param count number of DatanodeDetails needed.
+ *
+ * @return list of DatanodeDetails
+ */
+ public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
+ SCMNodeManager nodeManager, int count) {
+ ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
+ for (int i = 0; i < count; i++) {
+ datanodes.add(createRandomDatanodeAndRegister(nodeManager));
+ }
+ return datanodes;
+ }
+
+ /**
+ * Generates a random NodeReport.
+ *
+ * @return NodeReportProto
+ */
+ public static NodeReportProto getRandomNodeReport() {
+ return getRandomNodeReport(1);
+ }
+
+ /**
+ * Generates random NodeReport with the given number of storage report in it.
+ *
+ * @param numberOfStorageReport number of storage report this node report
+ * should have
+ * @return NodeReportProto
+ */
+ public static NodeReportProto getRandomNodeReport(int numberOfStorageReport) {
+ UUID nodeId = UUID.randomUUID();
+ return getRandomNodeReport(nodeId, File.separator + nodeId,
+ numberOfStorageReport);
+ }
+
+ /**
+ * Generates random NodeReport for the given nodeId with the given
+ * base path and number of storage report in it.
+ *
+ * @param nodeId datanode id
+ * @param basePath base path of storage directory
+ * @param numberOfStorageReport number of storage report
+ *
+ * @return NodeReportProto
+ */
+ public static NodeReportProto getRandomNodeReport(UUID nodeId,
+ String basePath, int numberOfStorageReport) {
+ List<StorageReportProto> storageReports = new ArrayList<>();
+ for (int i = 0; i < numberOfStorageReport; i++) {
+ storageReports.add(getRandomStorageReport(nodeId,
+ basePath + File.separator + i));
+ }
+ return createNodeReport(storageReports);
}
/**
- * Create a new DatanodeDetails with NodeID set to the string.
+ * Creates NodeReport with the given storage reports.
+ *
+ * @param reports one or more storage report
*
- * @param uuid - node ID, it is generally UUID.
- * @return DatanodeID.
+ * @return NodeReportProto
*/
- public static DatanodeDetails getDatanodeDetails(SCMNodeManager nodeManager,
- String uuid) {
- DatanodeDetails datanodeDetails = getDatanodeDetails(uuid);
- nodeManager.register(datanodeDetails, null);
- return datanodeDetails;
+ public static NodeReportProto createNodeReport(
+ StorageReportProto... reports) {
+ return createNodeReport(Arrays.asList(reports));
}
/**
- * Create Node Report object.
+ * Creates NodeReport with the given storage reports.
+ *
+ * @param reports storage reports to be included in the node report.
+ *
* @return NodeReportProto
*/
public static NodeReportProto createNodeReport(
@@ -75,100 +218,163 @@ public final class TestUtils {
}
/**
- * Create SCM Storage Report object.
- * @return list of SCMStorageReport
+ * Generates random storage report.
+ *
+ * @param nodeId datanode id for which the storage report belongs to
+ * @param path path of the storage
+ *
+ * @return StorageReportProto
*/
- public static List<StorageReportProto> createStorageReport(long capacity,
- long used, long remaining, String path, StorageTypeProto type, String id,
- int count) {
- List<StorageReportProto> reportList = new ArrayList<>();
- for (int i = 0; i < count; i++) {
- Preconditions.checkNotNull(path);
- Preconditions.checkNotNull(id);
- StorageReportProto.Builder srb = StorageReportProto.newBuilder();
- srb.setStorageUuid(id).setStorageLocation(path).setCapacity(capacity)
- .setScmUsed(used).setRemaining(remaining);
- StorageTypeProto storageTypeProto =
- type == null ? StorageTypeProto.DISK : type;
- srb.setStorageType(storageTypeProto);
- reportList.add(srb.build());
- }
- return reportList;
+ public static StorageReportProto getRandomStorageReport(UUID nodeId,
+ String path) {
+ return createStorageReport(nodeId, path,
+ random.nextInt(1000),
+ random.nextInt(500),
+ random.nextInt(500),
+ StorageTypeProto.DISK);
}
/**
- * Create Command Status report object.
- * @return CommandStatusReportsProto
+ * Creates storage report with the given information.
+ *
+ * @param nodeId datanode id
+ * @param path storage dir
+ * @param capacity storage size
+ * @param used space used
+ * @param remaining space remaining
+ * @param type type of storage
+ *
+ * @return StorageReportProto
*/
- public static CommandStatusReportsProto createCommandStatusReport(
- List<CommandStatus> reports) {
- CommandStatusReportsProto.Builder report = CommandStatusReportsProto
- .newBuilder();
- report.addAllCmdStatus(reports);
- return report.build();
+ public static StorageReportProto createStorageReport(UUID nodeId, String path,
+ long capacity, long used, long remaining, StorageTypeProto type) {
+ Preconditions.checkNotNull(nodeId);
+ Preconditions.checkNotNull(path);
+ StorageReportProto.Builder srb = StorageReportProto.newBuilder();
+ srb.setStorageUuid(nodeId.toString())
+ .setStorageLocation(path)
+ .setCapacity(capacity)
+ .setScmUsed(used)
+ .setRemaining(remaining);
+ StorageTypeProto storageTypeProto =
+ type == null ? StorageTypeProto.DISK : type;
+ srb.setStorageType(storageTypeProto);
+ return srb.build();
}
/**
- * Get specified number of DatanodeDetails and registered them with node
- * manager.
+ * Generates random container reports
*
- * @param nodeManager - node manager to register the datanode ids.
- * @param count - number of DatanodeDetails needed.
- * @return
+ * @return ContainerReportsProto
*/
- public static List<DatanodeDetails> getListOfRegisteredDatanodeDetails(
- SCMNodeManager nodeManager, int count) {
- ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
- for (int i = 0; i < count; i++) {
- datanodes.add(getDatanodeDetails(nodeManager));
+ public static ContainerReportsProto getRandomContainerReports() {
+ return getRandomContainerReports(1);
+ }
+
+ /**
+ * Generates random container report with the given number of containers.
+ *
+ * @param numberOfContainers number of containers to be in container report
+ *
+ * @return ContainerReportsProto
+ */
+ public static ContainerReportsProto getRandomContainerReports(
+ int numberOfContainers) {
+ List<ContainerInfo> containerInfos = new ArrayList<>();
+ for (int i = 0; i < numberOfContainers; i++) {
+ containerInfos.add(getRandomContainerInfo(i));
}
- return datanodes;
+ return getContainerReports(containerInfos);
}
/**
- * Get a datanode details.
+ * Creates container report with the given ContainerInfo(s).
*
- * @return DatanodeDetails
+ * @param containerInfos one or more ContainerInfo
+ *
+ * @return ContainerReportsProto
*/
- public static DatanodeDetails getDatanodeDetails() {
- return getDatanodeDetails(UUID.randomUUID().toString());
+ public static ContainerReportsProto getContainerReports(
+ ContainerInfo... containerInfos) {
+ return getContainerReports(Arrays.asList(containerInfos));
}
- private static DatanodeDetails getDatanodeDetails(String uuid) {
- Random random = new Random();
- String ipAddress =
- random.nextInt(256) + "." + random.nextInt(256) + "." + random
- .nextInt(256) + "." + random.nextInt(256);
+ /**
+ * Creates container report with the given ContainerInfo(s).
+ *
+ * @param containerInfos list of ContainerInfo
+ *
+ * @return ContainerReportsProto
+ */
+ public static ContainerReportsProto getContainerReports(
+ List<ContainerInfo> containerInfos) {
+ ContainerReportsProto.Builder
+ reportsBuilder = ContainerReportsProto.newBuilder();
+ for (ContainerInfo containerInfo : containerInfos) {
+ reportsBuilder.addReports(containerInfo);
+ }
+ return reportsBuilder.build();
+ }
- String hostName = uuid;
- DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
- DatanodeDetails.Port.Name.STANDALONE, 0);
- DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
- DatanodeDetails.Port.Name.RATIS, 0);
- DatanodeDetails.Port restPort = DatanodeDetails.newPort(
- DatanodeDetails.Port.Name.REST, 0);
- DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
- builder.setUuid(uuid)
- .setHostName("localhost")
- .setIpAddress(ipAddress)
- .addPort(containerPort)
- .addPort(ratisPort)
- .addPort(restPort);
- return builder.build();
+ /**
+ * Generates random ContainerInfo.
+ *
+ * @param containerId container id of the ContainerInfo
+ *
+ * @return ContainerInfo
+ */
+ public static ContainerInfo getRandomContainerInfo(long containerId) {
+ return createContainerInfo(containerId,
+ OzoneConsts.GB * 5,
+ random.nextLong(1000),
+ OzoneConsts.GB * random.nextInt(5),
+ random.nextLong(1000),
+ OzoneConsts.GB * random.nextInt(2),
+ random.nextLong(1000),
+ OzoneConsts.GB * random.nextInt(5));
}
/**
- * Get specified number of list of DatanodeDetails.
+ * Creates ContainerInfo with the given details.
*
- * @param count - number of datanode IDs needed.
- * @return
+ * @param containerId id of the container
+ * @param size size of container
+ * @param keyCount number of keys
+ * @param bytesUsed bytes used by the container
+ * @param readCount number of reads
+ * @param readBytes bytes read
+ * @param writeCount number of writes
+ * @param writeBytes bytes written
+ *
+ * @return ContainerInfo
*/
- public static List<DatanodeDetails> getListOfDatanodeDetails(int count) {
- ArrayList<DatanodeDetails> datanodes = new ArrayList<>();
- for (int i = 0; i < count; i++) {
- datanodes.add(getDatanodeDetails());
- }
- return datanodes;
+ public static ContainerInfo createContainerInfo(
+ long containerId, long size, long keyCount, long bytesUsed,
+ long readCount, long readBytes, long writeCount, long writeBytes) {
+ return ContainerInfo.newBuilder()
+ .setContainerID(containerId)
+ .setSize(size)
+ .setKeyCount(keyCount)
+ .setUsed(bytesUsed)
+ .setReadCount(readCount)
+ .setReadBytes(readBytes)
+ .setWriteCount(writeCount)
+ .setWriteBytes(writeBytes)
+ .build();
}
+
+ /**
+ * Create Command Status report object.
+ * @return CommandStatusReportsProto
+ */
+ public static CommandStatusReportsProto createCommandStatusReport(
+ List<CommandStatus> reports) {
+ CommandStatusReportsProto.Builder report = CommandStatusReportsProto
+ .newBuilder();
+ report.addAllCmdStatus(reports);
+ return report.build();
+ }
+
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
index 5e64e57..eca5b87 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/command/TestCommandStatusReportHandler.java
@@ -97,7 +97,7 @@ public class TestCommandStatusReportHandler implements EventPublisher {
reports) {
CommandStatusReportsProto report = TestUtils.createCommandStatusReport
(reports);
- DatanodeDetails dn = TestUtils.getDatanodeDetails();
+ DatanodeDetails dn = TestUtils.randomDatanodeDetails();
return new SCMDatanodeHeartbeatDispatcher.CommandStatusReportFromDatanode
(dn, report);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 593b780..088b700 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -16,6 +16,7 @@
*/
package org.apache.hadoop.hdds.scm.container;
+import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
import org.apache.hadoop.hdds.scm.node.NodeManager;
@@ -41,7 +42,6 @@ import java.util.List;
import java.util.Map;
import java.util.UUID;
-import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.DEAD;
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState
.HEALTHY;
@@ -80,7 +80,7 @@ public class MockNodeManager implements NodeManager {
aggregateStat = new SCMNodeStat();
if (initializeFakeNodes) {
for (int x = 0; x < nodeCount; x++) {
- DatanodeDetails dd = getDatanodeDetails();
+ DatanodeDetails dd = TestUtils.randomDatanodeDetails();
populateNodeMetric(dd, x);
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
index 42ab126..79ac9cf 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java
@@ -192,7 +192,7 @@ public class TestContainerMapping {
@Test
public void testFullContainerReport() throws IOException {
ContainerInfo info = createContainer();
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
new ArrayList<>();
StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder =
@@ -226,7 +226,7 @@ public class TestContainerMapping {
@Test
public void testContainerCloseWithContainerReport() throws IOException {
ContainerInfo info = createContainer();
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
List<StorageContainerDatanodeProtocolProtos.ContainerInfo> reports =
new ArrayList<>();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
index f7863bc..cc25544 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java
@@ -219,7 +219,7 @@ public class TestContainerCloser {
.setWriteBytes(2000000000L)
.setDeleteTransactionId(0);
reports.addReports(ciBuilder);
- mapping.processContainerReports(TestUtils.getDatanodeDetails(),
+ mapping.processContainerReports(TestUtils.randomDatanodeDetails(),
reports.build());
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
index 5966f2a..fea1e4b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementCapacity.java
@@ -44,7 +44,7 @@ public class TestSCMContainerPlacementCapacity {
List<DatanodeDetails> datanodes = new ArrayList<>();
for (int i = 0; i < 7; i++) {
- datanodes.add(TestUtils.getDatanodeDetails());
+ datanodes.add(TestUtils.randomDatanodeDetails());
}
NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
index 430c181..3b4426c 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/placement/algorithms/TestSCMContainerPlacementRandom.java
@@ -43,7 +43,7 @@ public class TestSCMContainerPlacementRandom {
List<DatanodeDetails> datanodes = new ArrayList<>();
for (int i = 0; i < 5; i++) {
- datanodes.add(TestUtils.getDatanodeDetails());
+ datanodes.add(TestUtils.randomDatanodeDetails());
}
NodeManager mockNodeManager = Mockito.mock(NodeManager.class);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
index e3e876b..99ec59f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/replication/TestReplicationManager.java
@@ -73,7 +73,12 @@ public class TestReplicationManager {
@Before
public void initReplicationManager() throws IOException {
- listOfDatanodeDetails = TestUtils.getListOfDatanodeDetails(5);
+ listOfDatanodeDetails = new ArrayList<>();
+ listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+ listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+ listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+ listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
+ listOfDatanodeDetails.add(TestUtils.randomDatanodeDetails());
containerPlacementPolicy =
(excludedNodes, nodesRequired, sizeRequired) -> listOfDatanodeDetails
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 48567ee..2fef620 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -142,10 +142,6 @@ public class TestContainerPlacement {
TestUtils.getListOfRegisteredDatanodeDetails(nodeManager, nodeCount);
try {
for (DatanodeDetails datanodeDetails : datanodes) {
- String id = UUID.randomUUID().toString();
- String path = testDir.getAbsolutePath() + "/" + id;
- List<StorageReportProto> reports = TestUtils
- .createStorageReport(capacity, used, remaining, path, null, id, 1);
nodeManager.processHeartbeat(datanodeDetails);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 5275992..cfa20be 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -154,8 +154,8 @@ public class TestNodeManager {
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
// Send some heartbeats from different nodes.
for (int x = 0; x < nodeManager.getMinimumChillModeNodes(); x++) {
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
- nodeManager);
+ DatanodeDetails datanodeDetails = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
nodeManager.processHeartbeat(datanodeDetails);
}
@@ -200,7 +200,8 @@ public class TestNodeManager {
// Need 100 nodes to come out of chill mode, only one node is sending HB.
nodeManager.setMinimumChillModeNodes(100);
- nodeManager.processHeartbeat(TestUtils.getDatanodeDetails(nodeManager));
+ nodeManager.processHeartbeat(TestUtils
+ .createRandomDatanodeAndRegister(nodeManager));
//TODO: wait for heartbeat to be processed
Thread.sleep(4 * 1000);
assertFalse("Not enough heartbeat, Node manager should have" +
@@ -223,7 +224,7 @@ public class TestNodeManager {
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
nodeManager.setMinimumChillModeNodes(3);
DatanodeDetails datanodeDetails = TestUtils
- .getDatanodeDetails(nodeManager);
+ .createRandomDatanodeAndRegister(nodeManager);
// Send 10 heartbeat from same node, and assert we never leave chill mode.
for (int x = 0; x < 10; x++) {
@@ -253,7 +254,8 @@ public class TestNodeManager {
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
SCMNodeManager nodeManager = createNodeManager(conf);
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(nodeManager);
+ DatanodeDetails datanodeDetails = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
nodeManager.close();
// These should never be processed.
@@ -276,14 +278,14 @@ public class TestNodeManager {
OzoneConfiguration conf = getConf();
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
- String dnId = datanodeDetails.getUuidString();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
+ UUID dnId = datanodeDetails.getUuid();
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
- List<StorageReportProto> reports =
- TestUtils.createStorageReport(100, 10, 90, storagePath, null, dnId, 1);
+ StorageReportProto report =
+ TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
try (SCMNodeManager nodemanager = createNodeManager(conf)) {
nodemanager.register(datanodeDetails,
- TestUtils.createNodeReport(reports));
+ TestUtils.createNodeReport(report));
List<SCMCommand> command = nodemanager.processHeartbeat(datanodeDetails);
Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
Assert.assertTrue("On regular HB calls, SCM responses a "
@@ -331,8 +333,8 @@ public class TestNodeManager {
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
for (int x = 0; x < count; x++) {
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
- nodeManager);
+ DatanodeDetails datanodeDetails = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
nodeManager.processHeartbeat(datanodeDetails);
}
//TODO: wait for heartbeat to be processed
@@ -421,7 +423,7 @@ public class TestNodeManager {
List<DatanodeDetails> nodeList = createNodeSet(nodeManager, nodeCount);
- DatanodeDetails staleNode = TestUtils.getDatanodeDetails(nodeManager);
+ DatanodeDetails staleNode = TestUtils.createRandomDatanodeAndRegister(nodeManager);
// Heartbeat once
nodeManager.processHeartbeat(staleNode);
@@ -560,11 +562,11 @@ public class TestNodeManager {
*/
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
DatanodeDetails healthyNode =
- TestUtils.getDatanodeDetails(nodeManager);
+ TestUtils.createRandomDatanodeAndRegister(nodeManager);
DatanodeDetails staleNode =
- TestUtils.getDatanodeDetails(nodeManager);
+ TestUtils.createRandomDatanodeAndRegister(nodeManager);
DatanodeDetails deadNode =
- TestUtils.getDatanodeDetails(nodeManager);
+ TestUtils.createRandomDatanodeAndRegister(nodeManager);
nodeManager.processHeartbeat(healthyNode);
nodeManager.processHeartbeat(staleNode);
nodeManager.processHeartbeat(deadNode);
@@ -693,8 +695,9 @@ public class TestNodeManager {
count) {
List<DatanodeDetails> list = new LinkedList<>();
for (int x = 0; x < count; x++) {
- list.add(TestUtils.getDatanodeDetails(nodeManager, UUID.randomUUID()
- .toString()));
+ DatanodeDetails datanodeDetails = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
+ list.add(datanodeDetails);
}
return list;
}
@@ -876,8 +879,8 @@ public class TestNodeManager {
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
nodeManager.setMinimumChillModeNodes(10);
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
- nodeManager);
+ DatanodeDetails datanodeDetails = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
nodeManager.processHeartbeat(datanodeDetails);
String status = nodeManager.getChillModeStatus();
Assert.assertThat(status, containsString("Still in chill " +
@@ -904,7 +907,8 @@ public class TestNodeManager {
// Assert that node manager force enter cannot be overridden by nodes HBs.
for (int x = 0; x < 20; x++) {
- DatanodeDetails datanode = TestUtils.getDatanodeDetails(nodeManager);
+ DatanodeDetails datanode = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
nodeManager.processHeartbeat(datanode);
}
@@ -943,14 +947,13 @@ public class TestNodeManager {
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
for (int x = 0; x < nodeCount; x++) {
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
- nodeManager);
- String dnId = datanodeDetails.getUuidString();
+ DatanodeDetails datanodeDetails = TestUtils
+ .createRandomDatanodeAndRegister(nodeManager);
+ UUID dnId = datanodeDetails.getUuid();
long free = capacity - used;
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
- List<StorageReportProto> reports = TestUtils
- .createStorageReport(capacity, used, free, storagePath,
- null, dnId, 1);
+ StorageReportProto report = TestUtils
+ .createStorageReport(dnId, storagePath, capacity, used, free, null);
nodeManager.processHeartbeat(datanodeDetails);
}
//TODO: wait for heartbeat to be processed
@@ -990,17 +993,17 @@ public class TestNodeManager {
try (SCMNodeManager nodeManager = createNodeManager(conf)) {
DatanodeDetails datanodeDetails =
- TestUtils.getDatanodeDetails(nodeManager);
+ TestUtils.createRandomDatanodeAndRegister(nodeManager);
final long capacity = 2000;
final long usedPerHeartbeat = 100;
- String dnId = datanodeDetails.getUuidString();
+ UUID dnId = datanodeDetails.getUuid();
for (int x = 0; x < heartbeatCount; x++) {
long scmUsed = x * usedPerHeartbeat;
long remaining = capacity - scmUsed;
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
- List<StorageReportProto> reports = TestUtils
- .createStorageReport(capacity, scmUsed, remaining, storagePath,
- null, dnId, 1);
+ StorageReportProto report = TestUtils
+ .createStorageReport(dnId, storagePath, capacity, scmUsed,
+ remaining, null);
nodeManager.processHeartbeat(datanodeDetails);
Thread.sleep(100);
@@ -1106,21 +1109,20 @@ public class TestNodeManager {
conf.getTimeDuration(ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
100, TimeUnit.MILLISECONDS);
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
- String dnId = datanodeDetails.getUuidString();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
+ UUID dnId = datanodeDetails.getUuid();
String storagePath = testDir.getAbsolutePath() + "/" + dnId;
- List<StorageReportProto> reports =
- TestUtils.createStorageReport(100, 10, 90,
- storagePath, null, dnId, 1);
+ StorageReportProto report =
+ TestUtils.createStorageReport(dnId, storagePath, 100, 10, 90, null);
EventQueue eq = new EventQueue();
try (SCMNodeManager nodemanager = createNodeManager(conf)) {
eq.addHandler(DATANODE_COMMAND, nodemanager);
nodemanager
- .register(datanodeDetails, TestUtils.createNodeReport(reports));
+ .register(datanodeDetails, TestUtils.createNodeReport(report));
eq.fireEvent(DATANODE_COMMAND,
- new CommandForDatanode(datanodeDetails.getUuid(),
+ new CommandForDatanode<>(datanodeDetails.getUuid(),
new CloseContainerCommand(1L, ReplicationType.STAND_ALONE)));
eq.processAll(1000L);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
index 3cbde4b..e2e89ab 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeReportHandler.java
@@ -17,7 +17,6 @@
package org.apache.hadoop.hdds.scm.node;
import java.io.IOException;
-import java.util.List;
import java.util.UUID;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -55,24 +54,22 @@ public class TestNodeReportHandler implements EventPublisher {
@Test
public void testNodeReport() throws IOException {
- DatanodeDetails dn = TestUtils.getDatanodeDetails();
- List<StorageReportProto> reports =
- TestUtils.createStorageReport(100, 10, 90, storagePath, null,
- dn.getUuid().toString(), 1);
+ DatanodeDetails dn = TestUtils.randomDatanodeDetails();
+ StorageReportProto storageOne = TestUtils
+ .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
nodeReportHandler.onMessage(
- getNodeReport(dn, reports), this);
+ getNodeReport(dn, storageOne), this);
SCMNodeMetric nodeMetric = nodeManager.getNodeStat(dn);
Assert.assertTrue(nodeMetric.get().getCapacity().get() == 100);
Assert.assertTrue(nodeMetric.get().getRemaining().get() == 90);
Assert.assertTrue(nodeMetric.get().getScmUsed().get() == 10);
- reports =
- TestUtils.createStorageReport(100, 10, 90, storagePath, null,
- dn.getUuid().toString(), 2);
+ StorageReportProto storageTwo = TestUtils
+ .createStorageReport(dn.getUuid(), storagePath, 100, 10, 90, null);
nodeReportHandler.onMessage(
- getNodeReport(dn, reports), this);
+ getNodeReport(dn, storageOne, storageTwo), this);
nodeMetric = nodeManager.getNodeStat(dn);
Assert.assertTrue(nodeMetric.get().getCapacity().get() == 200);
@@ -82,7 +79,7 @@ public class TestNodeReportHandler implements EventPublisher {
}
private NodeReportFromDatanode getNodeReport(DatanodeDetails dn,
- List<StorageReportProto> reports) {
+ StorageReportProto... reports) {
NodeReportProto nodeReportProto = TestUtils.createNodeReport(reports);
return new NodeReportFromDatanode(dn, nodeReportProto);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
index 072dee7..623fc16 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestSCMNodeStorageStatMap.java
@@ -139,18 +139,17 @@ public class TestSCMNodeStorageStatMap {
SCMNodeStorageStatMap map = new SCMNodeStorageStatMap(conf);
map.insertNewDatanode(key, reportSet);
Assert.assertTrue(map.isKnownDatanode(key));
- String storageId = UUID.randomUUID().toString();
+ UUID storageId = UUID.randomUUID();
String path =
GenericTestUtils.getRandomizedTempPath().concat("/" + storageId);
StorageLocationReport report = reportSet.iterator().next();
long reportCapacity = report.getCapacity();
long reportScmUsed = report.getScmUsed();
long reportRemaining = report.getRemaining();
- List<StorageReportProto> reports = TestUtils
- .createStorageReport(reportCapacity, reportScmUsed, reportRemaining,
- path, null, storageId, 1);
+ StorageReportProto storageReport = TestUtils.createStorageReport(storageId,
+ path, reportCapacity, reportScmUsed, reportRemaining, null);
StorageReportResult result =
- map.processNodeReport(key, TestUtils.createNodeReport(reports));
+ map.processNodeReport(key, TestUtils.createNodeReport(storageReport));
Assert.assertEquals(result.getStatus(),
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
StorageContainerDatanodeProtocolProtos.NodeReportProto.Builder nrb =
@@ -162,8 +161,8 @@ public class TestSCMNodeStorageStatMap {
SCMNodeStorageStatMap.ReportStatus.ALL_IS_WELL);
reportList.add(TestUtils
- .createStorageReport(reportCapacity, reportCapacity, 0, path, null,
- UUID.randomUUID().toString(), 1).get(0));
+ .createStorageReport(UUID.randomUUID(), path, reportCapacity,
+ reportCapacity, 0, null));
result = map.processNodeReport(key, TestUtils.createNodeReport(reportList));
Assert.assertEquals(result.getStatus(),
SCMNodeStorageStatMap.ReportStatus.STORAGE_OUT_OF_SPACE);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
index 1b79ebf..6a0b909 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/server/TestSCMDatanodeHeartbeatDispatcher.java
@@ -75,7 +75,7 @@ public class TestSCMDatanodeHeartbeatDispatcher {
}
});
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
SCMHeartbeatRequestProto heartbeat =
SCMHeartbeatRequestProto.newBuilder()
@@ -121,7 +121,7 @@ public class TestSCMDatanodeHeartbeatDispatcher {
}
});
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
SCMHeartbeatRequestProto heartbeat =
SCMHeartbeatRequestProto.newBuilder()
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 6619d26..e24e73e 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -16,15 +16,13 @@
*/
package org.apache.hadoop.ozone.container.common;
+import java.util.List;
import java.util.Map;
-import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.hdds.protocol.proto.
StorageContainerDatanodeProtocolProtos.CloseContainerCommandProto;
import org.apache.hadoop.hdds.protocol.proto.
@@ -44,8 +42,6 @@ import org.apache.hadoop.hdds.scm.VersionInfo;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
@@ -57,8 +53,6 @@ import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
import org.apache.hadoop.ozone.container.common.statemachine
.DatanodeStateMachine;
import org.apache.hadoop.ozone.container.common.statemachine
@@ -84,10 +78,8 @@ import static org.mockito.Mockito.mock;
import java.io.File;
import java.net.InetSocketAddress;
-import java.util.List;
import java.util.UUID;
-import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils
@@ -159,7 +151,7 @@ public class TestEndPoint {
OzoneConfiguration conf = SCMTestUtils.getConf();
try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
serverAddress, 1000)) {
- OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+ OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
conf);
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
@@ -183,8 +175,8 @@ public class TestEndPoint {
serverAddress, 1000)) {
GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
.captureLogs(VersionEndpointTask.LOG);
- OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
- conf);
+ OzoneContainer ozoneContainer = new OzoneContainer(TestUtils
+ .randomDatanodeDetails(), conf);
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
conf, ozoneContainer);
@@ -235,7 +227,7 @@ public class TestEndPoint {
try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
nonExistentServerAddress, 1000)) {
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
- OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+ OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
conf);
VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
conf, ozoneContainer);
@@ -262,7 +254,7 @@ public class TestEndPoint {
try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
serverAddress, (int) rpcTimeout)) {
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
- OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+ OzoneContainer ozoneContainer = new OzoneContainer(TestUtils.randomDatanodeDetails(),
conf);
VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
conf, ozoneContainer);
@@ -280,14 +272,14 @@ public class TestEndPoint {
@Test
public void testRegister() throws Exception {
- DatanodeDetails nodeToRegister = getDatanodeDetails();
+ DatanodeDetails nodeToRegister = TestUtils.randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint = createEndpoint(
SCMTestUtils.getConf(), serverAddress, 1000)) {
SCMRegisteredResponseProto responseProto = rpcEndPoint.getEndPoint()
.register(nodeToRegister.getProtoBufMessage(), TestUtils
.createNodeReport(
- getStorageReports(nodeToRegister.getUuidString())),
- createContainerReport(10, nodeToRegister));
+ getStorageReports(nodeToRegister.getUuid())),
+ TestUtils.getRandomContainerReports(10));
Assert.assertNotNull(responseProto);
Assert.assertEquals(nodeToRegister.getUuidString(),
responseProto.getDatanodeUUID());
@@ -298,9 +290,9 @@ public class TestEndPoint {
}
}
- private List<StorageReportProto> getStorageReports(String id) {
+ private StorageReportProto getStorageReports(UUID id) {
String storagePath = testDir.getAbsolutePath() + "/" + id;
- return TestUtils.createStorageReport(100, 10, 90, storagePath, null, id, 1);
+ return TestUtils.createStorageReport(id, storagePath, 100, 10, 90, null);
}
private EndpointStateMachine registerTaskHelper(InetSocketAddress scmAddress,
@@ -312,13 +304,13 @@ public class TestEndPoint {
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.REGISTER);
OzoneContainer ozoneContainer = mock(OzoneContainer.class);
when(ozoneContainer.getNodeReport()).thenReturn(TestUtils
- .createNodeReport(getStorageReports(UUID.randomUUID().toString())));
+ .createNodeReport(getStorageReports(UUID.randomUUID())));
when(ozoneContainer.getContainerReport()).thenReturn(
- createContainerReport(10, null));
+ TestUtils.getRandomContainerReports(10));
RegisterEndpointTask endpointTask =
new RegisterEndpointTask(rpcEndPoint, conf, ozoneContainer);
if (!clearDatanodeDetails) {
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
endpointTask.setDatanodeDetails(datanodeDetails);
}
endpointTask.call();
@@ -371,15 +363,14 @@ public class TestEndPoint {
@Test
public void testHeartbeat() throws Exception {
- DatanodeDetails dataNode = getDatanodeDetails();
+ DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint =
createEndpoint(SCMTestUtils.getConf(),
serverAddress, 1000)) {
- String storageId = UUID.randomUUID().toString();
SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
.setDatanodeDetails(dataNode.getProtoBufMessage())
.setNodeReport(TestUtils.createNodeReport(
- getStorageReports(storageId)))
+ getStorageReports(UUID.randomUUID())))
.build();
SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
@@ -391,11 +382,10 @@ public class TestEndPoint {
@Test
public void testHeartbeatWithCommandStatusReport() throws Exception {
- DatanodeDetails dataNode = getDatanodeDetails();
+ DatanodeDetails dataNode = TestUtils.randomDatanodeDetails();
try (EndpointStateMachine rpcEndPoint =
createEndpoint(SCMTestUtils.getConf(),
serverAddress, 1000)) {
- String storageId = UUID.randomUUID().toString();
// Add some scmCommands for heartbeat response
addScmCommands();
@@ -403,7 +393,7 @@ public class TestEndPoint {
SCMHeartbeatRequestProto request = SCMHeartbeatRequestProto.newBuilder()
.setDatanodeDetails(dataNode.getProtoBufMessage())
.setNodeReport(TestUtils.createNodeReport(
- getStorageReports(storageId)))
+ getStorageReports(UUID.randomUUID())))
.build();
SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
@@ -482,11 +472,11 @@ public class TestEndPoint {
// Create a datanode state machine for stateConext used by endpoint task
try (DatanodeStateMachine stateMachine = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), conf);
- EndpointStateMachine rpcEndPoint =
+ TestUtils.randomDatanodeDetails(), conf);
+ EndpointStateMachine rpcEndPoint =
createEndpoint(conf, scmAddress, rpcTimeout)) {
HddsProtos.DatanodeDetailsProto datanodeDetailsProto =
- getDatanodeDetails().getProtoBufMessage();
+ TestUtils.randomDatanodeDetails().getProtoBufMessage();
rpcEndPoint.setState(EndpointStateMachine.EndPointStates.HEARTBEAT);
final StateContext stateContext =
@@ -530,26 +520,4 @@ public class TestEndPoint {
lessThanOrEqualTo(rpcTimeout + tolerance));
}
- private ContainerReportsProto createContainerReport(
- int count, DatanodeDetails datanodeDetails) {
- StorageContainerDatanodeProtocolProtos.ContainerReportsProto.Builder
- reportsBuilder = StorageContainerDatanodeProtocolProtos
- .ContainerReportsProto.newBuilder();
- for (int x = 0; x < count; x++) {
- long containerID = RandomUtils.nextLong();
- ContainerReport report = new ContainerReport(containerID,
- DigestUtils.sha256Hex("Simulated"));
- report.setKeyCount(1000);
- report.setSize(OzoneConsts.GB * 5);
- report.setBytesUsed(OzoneConsts.GB * 2);
- report.setReadCount(100);
- report.setReadBytes(OzoneConsts.GB * 1);
- report.setWriteCount(50);
- report.setWriteBytes(OzoneConsts.GB * 2);
-
- reportsBuilder.addReports(report.getProtoBufMessage());
- }
- return reportsBuilder.build();
- }
-
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
index 50cdd54..3c9e0c3 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
@@ -109,9 +109,9 @@ public class TestMiniOzoneCluster {
@Test
public void testDatanodeIDPersistent() throws Exception {
// Generate IDs for testing
- DatanodeDetails id1 = TestUtils.getDatanodeDetails();
- DatanodeDetails id2 = TestUtils.getDatanodeDetails();
- DatanodeDetails id3 = TestUtils.getDatanodeDetails();
+ DatanodeDetails id1 = TestUtils.randomDatanodeDetails();
+ DatanodeDetails id2 = TestUtils.randomDatanodeDetails();
+ DatanodeDetails id3 = TestUtils.randomDatanodeDetails();
id1.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 1));
id2.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 2));
id3.setPort(DatanodeDetails.newPort(Port.Name.STANDALONE, 3));
@@ -162,11 +162,11 @@ public class TestMiniOzoneCluster {
true);
try (
DatanodeStateMachine sm1 = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), ozoneConf);
+ TestUtils.randomDatanodeDetails(), ozoneConf);
DatanodeStateMachine sm2 = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), ozoneConf);
+ TestUtils.randomDatanodeDetails(), ozoneConf);
DatanodeStateMachine sm3 = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), ozoneConf)
+ TestUtils.randomDatanodeDetails(), ozoneConf)
) {
HashSet<Integer> ports = new HashSet<Integer>();
assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
@@ -185,11 +185,11 @@ public class TestMiniOzoneCluster {
ozoneConf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
try (
DatanodeStateMachine sm1 = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), ozoneConf);
+ TestUtils.randomDatanodeDetails(), ozoneConf);
DatanodeStateMachine sm2 = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), ozoneConf);
+ TestUtils.randomDatanodeDetails(), ozoneConf);
DatanodeStateMachine sm3 = new DatanodeStateMachine(
- TestUtils.getDatanodeDetails(), ozoneConf)
+ TestUtils.randomDatanodeDetails(), ozoneConf)
) {
HashSet<Integer> ports = new HashSet<Integer>();
assertTrue(ports.add(sm1.getContainer().getContainerServerPort()));
@@ -204,7 +204,7 @@ public class TestMiniOzoneCluster {
private void createMalformedIDFile(File malformedFile)
throws IOException{
malformedFile.delete();
- DatanodeDetails id = TestUtils.getDatanodeDetails();
+ DatanodeDetails id = TestUtils.randomDatanodeDetails();
ContainerUtils.writeDatanodeDetailsTo(id, malformedFile);
FileOutputStream out = new FileOutputStream(malformedFile);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
index b1c2065..13ed192 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/metrics/TestContainerMetrics.java
@@ -72,7 +72,7 @@ public class TestContainerMetrics {
conf.setInt(DFSConfigKeys.DFS_METRICS_PERCENTILES_INTERVALS_KEY,
interval);
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, path);
VolumeSet volumeSet = new VolumeSet(
datanodeDetails.getUuidString(), conf);
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
index 18b325b..1522283 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
@@ -64,7 +64,7 @@ public class TestOzoneContainer {
.getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
- container = new OzoneContainer(TestUtils.getDatanodeDetails(),
+ container = new OzoneContainer(TestUtils.randomDatanodeDetails(),
conf);
//Setting scmId, as we start manually ozone container.
container.getDispatcher().setScmId(UUID.randomUUID().toString());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2ced3efe/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
index 3605677..bdb26fb 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
@@ -102,7 +102,7 @@ public class TestContainerServer {
@Test
public void testClientServer() throws Exception {
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
runTestClientServer(1,
(pipeline, conf) -> conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
pipeline.getLeader()
@@ -216,7 +216,7 @@ public class TestContainerServer {
HddsDispatcher dispatcher = new HddsDispatcher(
conf, mock(ContainerSet.class), mock(VolumeSet.class));
dispatcher.init();
- DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails();
+ DatanodeDetails datanodeDetails = TestUtils.randomDatanodeDetails();
server = new XceiverServer(datanodeDetails, conf, dispatcher);
client = new XceiverClient(pipeline, conf);
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org