You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ozone.apache.org by sa...@apache.org on 2020/02/10 02:23:34 UTC
[hadoop-ozone] 16/18: HDDS-2913 Update config names and CLI for
multi-raft feature. (#462)
This is an automated email from the ASF dual-hosted git repository.
sammichen pushed a commit to branch HDDS-1564
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git
commit 8b42cdcfc1e6a880f41f79842b2db0e5bd1b456d
Author: Li Cheng <bl...@gmail.com>
AuthorDate: Tue Jan 28 02:19:33 2020 +0800
HDDS-2913 Update config names and CLI for multi-raft feature. (#462)
---
.../org/apache/hadoop/hdds/scm/ScmConfigKeys.java | 18 +++++-----
.../apache/hadoop/hdds/scm/pipeline/Pipeline.java | 19 +++++++----
.../common/src/main/resources/ozone-default.xml | 9 ++---
.../hdds/scm/node/states/Node2PipelineMap.java | 5 ++-
.../hdds/scm/pipeline/PipelinePlacementPolicy.java | 38 ++++++++++------------
.../hdds/scm/pipeline/RatisPipelineProvider.java | 8 ++---
.../hdds/scm/pipeline/RatisPipelineUtils.java | 16 +++------
.../hdds/scm/pipeline/SCMPipelineManager.java | 30 ++++++++++-------
.../container/TestCloseContainerEventHandler.java | 2 +-
.../hadoop/hdds/scm/node/TestDeadNodeHandler.java | 4 +--
.../TestPipelineDatanodesIntersection.java | 32 ++++++++++--------
.../scm/pipeline/TestPipelinePlacementPolicy.java | 8 ++---
.../scm/pipeline/TestRatisPipelineProvider.java | 2 +-
.../hdds/scm/pipeline/TestSCMPipelineManager.java | 4 +--
.../hdds/scm/cli/datanode/ListInfoSubcommand.java | 11 +++----
.../scm/cli/pipeline/ListPipelinesSubcommand.java | 11 +++----
.../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java | 2 +-
.../TestRatisPipelineCreateAndDestroy.java | 4 +--
.../safemode/TestSCMSafeModeWithPipelineRules.java | 4 +--
.../apache/hadoop/ozone/MiniOzoneClusterImpl.java | 6 ++--
.../rpc/TestBlockOutputStreamWithFailures.java | 2 +-
.../rpc/TestContainerReplicationEndToEnd.java | 4 +--
.../client/rpc/TestFailureHandlingByClient.java | 2 +-
.../rpc/TestMultiBlockWritesWithDnFailures.java | 2 +-
.../ozone/client/rpc/TestWatchForCommit.java | 4 +--
.../TestCloseContainerByPipeline.java | 4 +--
.../hadoop/ozone/om/TestOzoneManagerRestart.java | 4 +--
.../hadoop/ozone/scm/node/TestQueryNode.java | 2 +-
28 files changed, 129 insertions(+), 128 deletions(-)
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
index 980a710..eb2a9e5 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/ScmConfigKeys.java
@@ -293,18 +293,18 @@ public final class ScmConfigKeys {
"ozone.scm.pipeline.owner.container.count";
public static final int OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT_DEFAULT = 3;
// Pipeline placement policy:
- // the max number of pipelines can a single datanode be engaged in.
- public static final String OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT =
- "ozone.scm.datanode.max.pipeline.engagement";
- // Setting to zero by default means this limit doesn't take effect.
- public static final int OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT = 2;
+ // Upper limit for how many pipelines a datanode can engage in.
+ public static final String OZONE_DATANODE_PIPELINE_LIMIT =
+ "ozone.datanode.pipeline.limit";
+ public static final int OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT = 2;
- // Upper limit for how many pipelines can be created.
+ // Upper limit for how many pipelines can be created
+ // across the cluster nodes managed by SCM.
// Only for test purpose now.
- public static final String OZONE_SCM_PIPELINE_NUMBER_LIMIT =
- "ozone.scm.pipeline.number.limit";
+ public static final String OZONE_SCM_RATIS_PIPELINE_LIMIT =
+ "ozone.scm.ratis.pipeline.limit";
// Setting to zero by default means this limit doesn't take effect.
- public static final int OZONE_SCM_PIPELINE_NUMBER_LIMIT_DEFAULT = 0;
+ public static final int OZONE_SCM_RATIS_PIPELINE_LIMIT_DEFAULT = 0;
public static final String
OZONE_SCM_KEY_VALUE_CONTAINER_DELETION_CHOOSING_POLICY =
diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
index 1dc2373..6849494 100644
--- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/pipeline/Pipeline.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdds.scm.pipeline;
import java.io.IOException;
+import java.time.Instant;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedHashMap;
@@ -57,7 +58,7 @@ public final class Pipeline {
// Current reported Leader for the pipeline
private UUID leaderId;
// Timestamp for pipeline upon creation
- private Long creationTimestamp;
+ private Instant creationTimestamp;
// Only valid for Ratis THREE pipeline. No need persist.
private int nodeIdsHash;
@@ -74,7 +75,7 @@ public final class Pipeline {
this.factor = factor;
this.state = state;
this.nodeStatus = nodeStatus;
- this.creationTimestamp = System.currentTimeMillis();
+ this.creationTimestamp = Instant.now();
this.nodeIdsHash = 0;
}
@@ -119,7 +120,7 @@ public final class Pipeline {
*
* @return Creation Timestamp
*/
- public Long getCreationTimestamp() {
+ public Instant getCreationTimestamp() {
return creationTimestamp;
}
@@ -128,7 +129,7 @@ public final class Pipeline {
*
* @param creationTimestamp
*/
- void setCreationTimestamp(Long creationTimestamp) {
+ void setCreationTimestamp(Instant creationTimestamp) {
this.creationTimestamp = creationTimestamp;
}
@@ -253,7 +254,7 @@ public final class Pipeline {
.setFactor(factor)
.setState(PipelineState.getProtobuf(state))
.setLeaderID(leaderId != null ? leaderId.toString() : "")
- .setCreationTimeStamp(creationTimestamp)
+ .setCreationTimeStamp(creationTimestamp.toEpochMilli())
.addAllMembers(nodeStatus.keySet().stream()
.map(DatanodeDetails::getProtoBufMessage)
.collect(Collectors.toList()));
@@ -289,6 +290,7 @@ public final class Pipeline {
.setNodes(pipeline.getMembersList().stream()
.map(DatanodeDetails::getFromProtoBuf).collect(Collectors.toList()))
.setNodesInOrder(pipeline.getMemberOrdersList())
+ .setCreateTimestamp(pipeline.getCreationTimeStamp())
.build();
}
@@ -357,7 +359,7 @@ public final class Pipeline {
private List<Integer> nodeOrder = null;
private List<DatanodeDetails> nodesInOrder = null;
private UUID leaderId = null;
- private Long creationTimestamp = null;
+ private Instant creationTimestamp = null;
private int nodeIdsHash = 0;
public Builder() {}
@@ -410,6 +412,11 @@ public final class Pipeline {
return this;
}
+ public Builder setCreateTimestamp(long createTimestamp) {
+ this.creationTimestamp = Instant.ofEpochMilli(createTimestamp);
+ return this;
+ }
+
public Builder setNodeIdsHash(int nodeIdsHash1) {
this.nodeIdsHash = nodeIdsHash1;
return this;
diff --git a/hadoop-hdds/common/src/main/resources/ozone-default.xml b/hadoop-hdds/common/src/main/resources/ozone-default.xml
index 31fe046..875f79f 100644
--- a/hadoop-hdds/common/src/main/resources/ozone-default.xml
+++ b/hadoop-hdds/common/src/main/resources/ozone-default.xml
@@ -791,14 +791,14 @@
</description>
</property>
<property>
- <name>ozone.scm.datanode.max.pipeline.engagement</name>
+ <name>ozone.datanode.pipeline.limit</name>
<value>2</value>
<tag>OZONE, SCM, PIPELINE</tag>
<description>Max number of pipelines per datanode can be engaged in.
</description>
</property>
<property>
- <name>ozone.scm.pipeline.number.limit</name>
+ <name>ozone.scm.ratis.pipeline.limit</name>
<value>0</value>
<tag>OZONE, SCM, PIPELINE</tag>
<description>Upper limit for how many pipelines can be OPEN in SCM.
@@ -813,8 +813,9 @@
<description>
Timeout for every pipeline to stay in ALLOCATED stage. When pipeline is created,
it should be at OPEN stage once pipeline report is successfully received by SCM.
- If a pipeline stays at ALLOCATED for too long, it should be scrubbed so that new
- pipeline can be created. This timeout is for how long pipeline can stay at ALLOCATED
+ If a pipeline stays at ALLOCATED longer than the specified period of time,
+ it should be scrubbed so that new pipeline can be created.
+ This timeout is for how long pipeline can stay at ALLOCATED
stage until it gets scrubbed.
</description>
</property>
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
index 18809ed..6533cb8 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/states/Node2PipelineMap.java
@@ -57,8 +57,7 @@ public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
* @return Number of pipelines or 0.
*/
public int getPipelinesCount(UUID datanode) {
- Set<PipelineID> pipelines = getObjects(datanode);
- return pipelines == null ? 0 : pipelines.size();
+ return getObjects(datanode).size();
}
/**
@@ -80,7 +79,7 @@ public class Node2PipelineMap extends Node2ObjectsMap<PipelineID> {
dn2ObjectMap.computeIfPresent(dnId,
(k, v) -> {
v.remove(pipeline.getId());
- return v.isEmpty() ? null : v;
+ return v;
});
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
index 8c6e5c7..4261a87 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/PipelinePlacementPolicy.java
@@ -72,8 +72,8 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
this.conf = conf;
this.stateManager = stateManager;
this.heavyNodeCriteria = conf.getInt(
- ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT,
- ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT);
+ ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT,
+ ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT);
}
/**
@@ -113,7 +113,7 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
}
boolean meet = (nodeManager.getPipelinesCount(datanodeDetails)
- pipelineNumDeductable) < heavyNodeCriteria;
- if (!meet) {
+ if (!meet && LOG.isDebugEnabled()) {
LOG.debug("Pipeline Placement: can't place more pipeline on heavy " +
"datanodeļ¼ " + datanodeDetails.getUuid().toString() +
" Heaviness: " + nodeManager.getPipelinesCount(datanodeDetails) +
@@ -143,17 +143,11 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
}
int initialHealthyNodesCount = healthyNodes.size();
String msg;
- if (initialHealthyNodesCount == 0) {
- msg = "No healthy nodes found to allocate pipeline.";
- LOG.error(msg);
- throw new SCMException(msg, SCMException.ResultCodes
- .FAILED_TO_FIND_SUITABLE_NODE);
- }
if (initialHealthyNodesCount < nodesRequired) {
- LOG.warn("Not enough healthy nodes to allocate pipeline. %d "
- + " datanodes required. Found %d",
- nodesRequired, initialHealthyNodesCount);
+ LOG.warn("Not enough healthy nodes to allocate pipeline." +
+ nodesRequired + " datanodes required. Found: " +
+ initialHealthyNodesCount);
msg = String.format("Pipeline creation failed due to no sufficient" +
" healthy datanodes. Required %d. Found %d.",
nodesRequired, initialHealthyNodesCount);
@@ -168,15 +162,17 @@ public final class PipelinePlacementPolicy extends SCMCommonPlacementPolicy {
.collect(Collectors.toList());
if (healthyList.size() < nodesRequired) {
- LOG.debug("Unable to find enough nodes that meet " +
- "the criteria that cannot engage in more than %d pipelines." +
- " Nodes required: %d Found: %d, healthy nodes count in " +
- "NodeManager: %d.",
- heavyNodeCriteria, nodesRequired, healthyList.size(),
- initialHealthyNodesCount);
- msg = String.format("Pipeline creation failed due to not enough" +
- " healthy datanodes after filter. Required %d. Found %d",
- nodesRequired, initialHealthyNodesCount);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Unable to find enough nodes that meet the criteria that" +
+ " cannot engage in more than" + heavyNodeCriteria +
+ " pipelines. Nodes required: " + nodesRequired + " Found:" +
+ healthyList.size() + " healthy nodes count in NodeManager: " +
+ initialHealthyNodesCount);
+ }
+ msg = String.format("Pipeline creation failed because nodes are engaged" +
+ " in other pipelines and every node can only be engaged in" +
+ " max %d pipelines. Required %d. Found %d",
+ heavyNodeCriteria, nodesRequired, healthyList.size());
throw new SCMException(msg,
SCMException.ResultCodes.FAILED_TO_FIND_SUITABLE_NODE);
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
index 9585907..4865074 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineProvider.java
@@ -84,11 +84,11 @@ public class RatisPipelineProvider implements PipelineProvider {
this.placementPolicy =
new PipelinePlacementPolicy(nodeManager, stateManager, conf);
this.pipelineNumberLimit = conf.getInt(
- ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT,
- ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT_DEFAULT);
+ ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT,
+ ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT_DEFAULT);
this.maxPipelinePerDatanode = conf.getInt(
- ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT,
- ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT);
+ ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT,
+ ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT);
}
private List<DatanodeDetails> pickNodesNeverUsed(ReplicationFactor factor)
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
index f9f2011..7fe1cc1 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/RatisPipelineUtils.java
@@ -106,27 +106,21 @@ public final class RatisPipelineUtils {
}
/**
- * Return first existed pipeline which share the same set of datanodes
+ * Return the list of pipelines who share the same set of datanodes
* with the input pipeline.
* @param stateManager PipelineStateManager
* @param pipeline input pipeline
* @return first matched pipeline
*/
- static Pipeline checkPipelineContainSameDatanodes(
+ static List<Pipeline> checkPipelineContainSameDatanodes(
PipelineStateManager stateManager, Pipeline pipeline) {
- List<Pipeline> matchedPipelines = stateManager.getPipelines(
+ return stateManager.getPipelines(
HddsProtos.ReplicationType.RATIS,
HddsProtos.ReplicationFactor.THREE)
.stream().filter(p -> !p.getId().equals(pipeline.getId()) &&
(// For all OPEN or ALLOCATED pipelines
- p.getPipelineState() == Pipeline.PipelineState.OPEN ||
- p.getPipelineState() == Pipeline.PipelineState.ALLOCATED) &&
- p.getNodeIdsHash() == pipeline.getNodeIdsHash())
+ p.getPipelineState() != Pipeline.PipelineState.CLOSED &&
+ p.getNodeIdsHash() == pipeline.getNodeIdsHash()))
.collect(Collectors.toList());
- if (matchedPipelines.size() == 0) {
- return null;
- } else {
- return matchedPipelines.stream().findFirst().get();
- }
}
}
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
index f924b41..88c4329 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipeline/SCMPipelineManager.java
@@ -45,6 +45,8 @@ import org.slf4j.LoggerFactory;
import javax.management.ObjectName;
import java.io.File;
import java.io.IOException;
+import java.time.Duration;
+import java.time.Instant;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -172,17 +174,20 @@ public class SCMPipelineManager implements PipelineManager {
metrics.incNumPipelineCreated();
metrics.createPerPipelineMetrics(pipeline);
}
- Pipeline overlapPipeline = RatisPipelineUtils
+ List<Pipeline> overlapPipelines = RatisPipelineUtils
.checkPipelineContainSameDatanodes(stateManager, pipeline);
- if (overlapPipeline != null) {
+ if (!overlapPipelines.isEmpty()) {
+ // Count 1 overlap at a time.
metrics.incNumPipelineContainSameDatanodes();
//TODO remove until pipeline allocation is proved equally distributed.
- LOG.info("Pipeline: " + pipeline.getId().toString() +
- " contains same datanodes as previous pipeline: " +
- overlapPipeline.getId().toString() + " nodeIds: " +
- pipeline.getNodes().get(0).getUuid().toString() +
- ", " + pipeline.getNodes().get(1).getUuid().toString() +
- ", " + pipeline.getNodes().get(2).getUuid().toString());
+ for (Pipeline overlapPipeline : overlapPipelines) {
+ LOG.info("Pipeline: " + pipeline.getId().toString() +
+ " contains same datanodes as previous pipelines: " +
+ overlapPipeline.getId().toString() + " nodeIds: " +
+ pipeline.getNodes().get(0).getUuid().toString() +
+ ", " + pipeline.getNodes().get(1).getUuid().toString() +
+ ", " + pipeline.getNodes().get(2).getUuid().toString());
+ }
}
return pipeline;
} catch (IOException ex) {
@@ -381,20 +386,21 @@ public class SCMPipelineManager implements PipelineManager {
// Only srub pipeline for RATIS THREE pipeline
return;
}
- Long currentTime = System.currentTimeMillis();
+ Instant currentTime = Instant.now();
Long pipelineScrubTimeoutInMills = conf.getTimeDuration(
ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT,
ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
List<Pipeline> needToSrubPipelines = stateManager.getPipelines(type, factor,
Pipeline.PipelineState.ALLOCATED).stream()
- .filter(p -> (currentTime - p.getCreationTimestamp()
- >= pipelineScrubTimeoutInMills))
+ .filter(p -> currentTime.toEpochMilli() - p.getCreationTimestamp()
+ .toEpochMilli() >= pipelineScrubTimeoutInMills)
.collect(Collectors.toList());
for (Pipeline p : needToSrubPipelines) {
LOG.info("srubbing pipeline: id: " + p.getId().toString() +
" since it stays at ALLOCATED stage for " +
- (currentTime - p.getCreationTimestamp())/60000 + " mins.");
+ Duration.between(currentTime, p.getCreationTimestamp()).toMinutes() +
+ " mins.");
finalizeAndDestroyPipeline(p, false);
}
}
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
index f35bfe2..10c38a8 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestCloseContainerEventHandler.java
@@ -68,7 +68,7 @@ public class TestCloseContainerEventHandler {
.getTestDir(TestCloseContainerEventHandler.class.getSimpleName());
configuration
.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
- configuration.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT, 16);
+ configuration.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT, 16);
nodeManager = new MockNodeManager(true, 10);
eventQueue = new EventQueue();
pipelineManager =
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
index 977038e..4cdc46f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDeadNodeHandler.java
@@ -66,7 +66,7 @@ import org.junit.Ignore;
import org.junit.Test;
import org.mockito.Mockito;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
/**
* Test DeadNodeHandler.
@@ -89,7 +89,7 @@ public class TestDeadNodeHandler {
storageDir = GenericTestUtils.getTempPath(
TestDeadNodeHandler.class.getSimpleName() + UUID.randomUUID());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, storageDir);
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 0);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 0);
eventQueue = new EventQueue();
scm = HddsTestUtils.getScm(conf);
nodeManager = (SCMNodeManager) scm.getScmNodeManager();
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
index 87e8cf4..41eea3d 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineDatanodesIntersection.java
@@ -34,8 +34,9 @@ import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collection;
+import java.util.List;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
/**
@@ -77,7 +78,7 @@ public class TestPipelineDatanodesIntersection {
@Test
public void testPipelineDatanodesIntersection() {
NodeManager nodeManager= new MockNodeManager(true, nodeCount);
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, nodeHeaviness);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, nodeHeaviness);
conf.setBoolean(OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE, false);
PipelineStateManager stateManager = new PipelineStateManager();
PipelineProvider provider = new MockRatisPipelineProvider(nodeManager,
@@ -92,20 +93,23 @@ public class TestPipelineDatanodesIntersection {
Pipeline pipeline = provider.create(HddsProtos.ReplicationFactor.THREE);
stateManager.addPipeline(pipeline);
nodeManager.addPipeline(pipeline);
- Pipeline overlapPipeline = RatisPipelineUtils
+ List<Pipeline> overlapPipelines = RatisPipelineUtils
.checkPipelineContainSameDatanodes(stateManager, pipeline);
- if (overlapPipeline != null){
+
+ if (overlapPipelines.isEmpty()){
intersectionCount++;
- LOG.info("This pipeline: " + pipeline.getId().toString() +
- " overlaps with previous pipeline: " + overlapPipeline.getId() +
- ". They share same set of datanodes as: " +
- pipeline.getNodesInOrder().get(0).getUuid() + "/" +
- pipeline.getNodesInOrder().get(1).getUuid() + "/" +
- pipeline.getNodesInOrder().get(2).getUuid() + " and " +
- overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" +
- overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" +
- overlapPipeline.getNodesInOrder().get(2).getUuid() +
- " is the same.");
+ for (Pipeline overlapPipeline : overlapPipelines) {
+ LOG.info("This pipeline: " + pipeline.getId().toString() +
+ " overlaps with previous pipeline: " + overlapPipeline.getId() +
+ ". They share same set of datanodes as: " +
+ pipeline.getNodesInOrder().get(0).getUuid() + "/" +
+ pipeline.getNodesInOrder().get(1).getUuid() + "/" +
+ pipeline.getNodesInOrder().get(2).getUuid() + " and " +
+ overlapPipeline.getNodesInOrder().get(0).getUuid() + "/" +
+ overlapPipeline.getNodesInOrder().get(1).getUuid() + "/" +
+ overlapPipeline.getNodesInOrder().get(2).getUuid() +
+ " is the same.");
+ }
}
createdPipelineCount++;
} catch(SCMException e) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
index 2fe67f9..2fff7d9 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelinePlacementPolicy.java
@@ -34,7 +34,7 @@ import org.junit.Test;
import java.util.*;
import java.util.stream.Collectors;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
/**
* Test for PipelinePlacementPolicy.
@@ -50,7 +50,7 @@ public class TestPipelinePlacementPolicy {
nodeManager = new MockNodeManager(true,
PIPELINE_PLACEMENT_MAX_NODES_COUNT);
conf = new OzoneConfiguration();
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 5);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 5);
placementPolicy = new PipelinePlacementPolicy(
nodeManager, new PipelineStateManager(), conf);
}
@@ -185,8 +185,8 @@ public class TestPipelinePlacementPolicy {
int considerHeavyCount =
conf.getInt(
- ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT,
- ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT_DEFAULT) + 1;
+ ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT,
+ ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT_DEFAULT) + 1;
Node2PipelineMap mockMap = new Node2PipelineMap();
for (DatanodeDetails node : nodes) {
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
index 5623359..a17fc08 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineProvider.java
@@ -61,7 +61,7 @@ public class TestRatisPipelineProvider {
public void init() throws Exception {
nodeManager = new MockNodeManager(true, 10);
conf = new OzoneConfiguration();
- conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT,
+ conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT,
maxPipelinePerNode);
stateManager = new PipelineStateManager();
provider = new MockRatisPipelineProvider(nodeManager,
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
index e6bf7a0..deba91b 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestSCMPipelineManager.java
@@ -19,7 +19,7 @@
package org.apache.hadoop.hdds.scm.pipeline;
import static org.apache.commons.collections.CollectionUtils.intersection;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_ALLOCATED_TIMEOUT;
import static org.apache.hadoop.test.MetricsAsserts.getLongCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
@@ -65,7 +65,7 @@ public class TestSCMPipelineManager {
@Before
public void setUp() throws Exception {
conf = new OzoneConfiguration();
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 1);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 1);
testDir = GenericTestUtils
.getTestDir(TestSCMPipelineManager.class.getSimpleName());
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, testDir.getAbsolutePath());
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
index dcd8402..badfadc 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/datanode/ListInfoSubcommand.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdds.scm.cli.datanode;
+import com.google.common.base.Strings;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
@@ -62,15 +63,15 @@ public class ListInfoSubcommand implements Callable<Void> {
public Void call() throws Exception {
try (ScmClient scmClient = parent.getParent().createScmClient()) {
pipelines = scmClient.listPipelines();
- if (isNullOrEmpty(ipaddress) && isNullOrEmpty(uuid)) {
+ if (Strings.isNullOrEmpty(ipaddress) && Strings.isNullOrEmpty(uuid)) {
getAllNodes(scmClient).stream().forEach(p -> printDatanodeInfo(p));
} else {
Stream<DatanodeDetails> allNodes = getAllNodes(scmClient).stream();
- if (!isNullOrEmpty(ipaddress)) {
+ if (!Strings.isNullOrEmpty(ipaddress)) {
allNodes = allNodes.filter(p -> p.getIpAddress()
.compareToIgnoreCase(ipaddress) == 0);
}
- if (!isNullOrEmpty(uuid)) {
+ if (!Strings.isNullOrEmpty(uuid)) {
allNodes = allNodes.filter(p -> p.getUuid().toString().equals(uuid));
}
allNodes.forEach(p -> printDatanodeInfo(p));
@@ -117,8 +118,4 @@ public class ListInfoSubcommand implements Callable<Void> {
+ datanode.getHostName() + "/" + relatedPipelineNum +
" pipelines) \n" + "Related pipelines: \n" + pipelineListInfo);
}
-
- protected static boolean isNullOrEmpty(String str) {
- return ((str == null) || str.trim().isEmpty());
- }
}
\ No newline at end of file
diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
index 8b3b1b3..f8ac1d4 100644
--- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
+++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/pipeline/ListPipelinesSubcommand.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.hdds.scm.cli.pipeline;
+import com.google.common.base.Strings;
import org.apache.hadoop.hdds.cli.HddsVersionProvider;
import org.apache.hadoop.hdds.scm.client.ScmClient;
import picocli.CommandLine;
@@ -53,13 +54,13 @@ public class ListPipelinesSubcommand implements Callable<Void> {
@Override
public Void call() throws Exception {
try (ScmClient scmClient = parent.getParent().createScmClient()) {
- if (isNullOrEmpty(factor) && isNullOrEmpty(state)) {
+ if (Strings.isNullOrEmpty(factor) && Strings.isNullOrEmpty(state)) {
scmClient.listPipelines().forEach(System.out::println);
} else {
scmClient.listPipelines().stream()
- .filter(p -> ((isNullOrEmpty(factor) ||
+ .filter(p -> ((Strings.isNullOrEmpty(factor) ||
(p.getFactor().toString().compareToIgnoreCase(factor) == 0))
- && (isNullOrEmpty(state) ||
+ && (Strings.isNullOrEmpty(state) ||
(p.getPipelineState().toString().compareToIgnoreCase(state)
== 0))))
.forEach(System.out::println);
@@ -67,8 +68,4 @@ public class ListPipelinesSubcommand implements Callable<Void> {
return null;
}
}
-
- protected static boolean isNullOrEmpty(String str) {
- return ((str == null) || str.trim().isEmpty());
- }
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
index acc4031..7a6143c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
@@ -97,7 +97,7 @@ public class TestOzoneFsHAURLs {
conf.setTimeDuration(
OMConfigKeys.OZONE_OM_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
LEADER_ELECTION_TIMEOUT, TimeUnit.MILLISECONDS);
- conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 3);
+ conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
OMStorage omStore = new OMStorage(conf);
omStore.setClusterId(clusterId);
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
index fc90ee9..bd677db 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestRatisPipelineCreateAndDestroy.java
@@ -37,7 +37,7 @@ import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
/**
@@ -53,7 +53,7 @@ public class TestRatisPipelineCreateAndDestroy {
public void init(int numDatanodes) throws Exception {
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
GenericTestUtils.getRandomizedTempPath());
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(numDatanodes)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
index 4b35317..39b67ac 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeWithPipelineRules.java
@@ -39,7 +39,7 @@ import org.junit.rules.TemporaryFolder;
import java.util.List;
import java.util.concurrent.TimeoutException;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
import static org.junit.Assert.fail;
/**
@@ -65,7 +65,7 @@ public class TestSCMSafeModeWithPipelineRules {
true);
conf.set(HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "10s");
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_CREATION_INTERVAL, "10s");
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 50);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 50);
clusterBuilder = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(numDatanodes)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
index 9bfa8bd..7758c3c 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClusterImpl.java
@@ -576,9 +576,9 @@ public class MiniOzoneClusterImpl implements MiniOzoneCluster {
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, blockSize.get(),
streamBufferSizeUnit.get());
// MiniOzoneCluster should have global pipeline upper limit.
- conf.setInt(ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT,
- pipelineNumLimit == DEFAULT_PIPELIME_LIMIT ?
- 2 * numOfDatanodes : pipelineNumLimit);
+ conf.setInt(ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT,
+ pipelineNumLimit >= DEFAULT_PIPELIME_LIMIT ?
+ pipelineNumLimit : DEFAULT_PIPELIME_LIMIT);
configureTrace();
}
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
index 07e306e..1b6b7dc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestBlockOutputStreamWithFailures.java
@@ -92,7 +92,7 @@ public class TestBlockOutputStreamWithFailures {
conf.setQuietMode(false);
conf.setStorageSize(OzoneConfigKeys.OZONE_SCM_BLOCK_SIZE, 4,
StorageUnit.MB);
- conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 3);
+ conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(7)
.setTotalPipelineNumLimit(10).setBlockSize(blockSize)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
index 439287e..6917ab2 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerReplicationEndToEnd.java
@@ -57,7 +57,7 @@ import java.util.function.Predicate;
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTROY_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
/**
* Tests delete key operation with a slow follower in the datanode
@@ -108,7 +108,7 @@ public class TestContainerReplicationEndToEnd {
1000, TimeUnit.SECONDS);
conf.setLong("hdds.scm.replication.thread.interval",
containerReportInterval);
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
conf.setQuietMode(false);
cluster =
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
index d4e5d7d..a84e16e 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestFailureHandlingByClient.java
@@ -99,7 +99,7 @@ public class TestFailureHandlingByClient {
1, TimeUnit.SECONDS);
conf.setBoolean(
OzoneConfigKeys.OZONE_NETWORK_TOPOLOGY_AWARE_READ_KEY, true);
- conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2);
+ conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 2);
conf.setQuietMode(false);
conf.setClass(NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
index 9b62923..7d31499 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestMultiBlockWritesWithDnFailures.java
@@ -87,7 +87,7 @@ public class TestMultiBlockWritesWithDnFailures {
conf.setTimeDuration(
OzoneConfigKeys.DFS_RATIS_LEADER_ELECTION_MINIMUM_TIMEOUT_DURATION_KEY,
1, TimeUnit.SECONDS);
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
conf.setQuietMode(false);
cluster = MiniOzoneCluster.newBuilder(conf)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index b84e61c..95dcedc 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -58,7 +58,7 @@ import java.util.concurrent.TimeoutException;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_SCM_WATCHER_TIMEOUT;
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
/**
* This class verifies the watchForCommit Handling by xceiverClient.
@@ -96,7 +96,7 @@ public class TestWatchForCommit {
conf.setTimeDuration(
OzoneConfigKeys.DFS_RATIS_CLIENT_REQUEST_RETRY_INTERVAL_KEY,
1, TimeUnit.SECONDS);
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 5);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 5);
conf.setQuietMode(false);
cluster = MiniOzoneCluster.newBuilder(conf)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
index 8ee47a9..869f091 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
@@ -53,7 +53,7 @@ import java.util.HashMap;
import java.util.List;
import java.util.concurrent.TimeoutException;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT;
/**
* Test container closing.
@@ -77,7 +77,7 @@ public class TestCloseContainerByPipeline {
public static void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(ScmConfigKeys.OZONE_SCM_PIPELINE_OWNER_CONTAINER_COUNT, "1");
- conf.setInt(OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 2);
+ conf.setInt(OZONE_DATANODE_PIPELINE_LIMIT, 2);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(10)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
index ce27eed..6058fad 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRestart.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.test.GenericTestUtils;
import org.apache.commons.lang3.RandomStringUtils;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_NUMBER_LIMIT;
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_RATIS_PIPELINE_LIMIT;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
@@ -81,7 +81,7 @@ public class TestOzoneManagerRestart {
conf.setBoolean(OZONE_ACL_ENABLED, true);
conf.setInt(OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS, 2);
conf.set(OZONE_ADMINISTRATORS, OZONE_ADMINISTRATORS_WILDCARD);
- conf.setInt(OZONE_SCM_PIPELINE_NUMBER_LIMIT, 10);
+ conf.setInt(OZONE_SCM_RATIS_PIPELINE_LIMIT, 10);
cluster = MiniOzoneCluster.newBuilder(conf)
.setClusterId(clusterId)
.setScmId(scmId)
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
index 1ca3110..14660d6 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/node/TestQueryNode.java
@@ -79,7 +79,7 @@ public class TestQueryNode {
conf.setTimeDuration(HDDS_NODE_REPORT_INTERVAL, 1, SECONDS);
conf.setTimeDuration(OZONE_SCM_STALENODE_INTERVAL, 3, SECONDS);
conf.setTimeDuration(OZONE_SCM_DEADNODE_INTERVAL, 6, SECONDS);
- conf.setInt(ScmConfigKeys.OZONE_DATANODE_MAX_PIPELINE_ENGAGEMENT, 3);
+ conf.setInt(ScmConfigKeys.OZONE_DATANODE_PIPELINE_LIMIT, 3);
cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(numOfDatanodes)
---------------------------------------------------------------------
To unsubscribe, e-mail: ozone-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: ozone-commits-help@hadoop.apache.org