You are viewing a plain text version of this content. The canonical link for it is here.
Posted to common-commits@hadoop.apache.org by xy...@apache.org on 2018/05/22 22:51:57 UTC
hadoop git commit: HDDS-79. Remove ReportState from
SCMHeartbeatRequestProto. Contributed by Nanda kumar.
Repository: hadoop
Updated Branches:
refs/heads/trunk 43be9ab44 -> 68c7fd8e6
HDDS-79. Remove ReportState from SCMHeartbeatRequestProto. Contributed by Nanda kumar.
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68c7fd8e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68c7fd8e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68c7fd8e
Branch: refs/heads/trunk
Commit: 68c7fd8e6092e8436ecf96852c608708f311f262
Parents: 43be9ab
Author: Xiaoyu Yao <xy...@apache.org>
Authored: Tue May 22 15:46:59 2018 -0700
Committer: Xiaoyu Yao <xy...@apache.org>
Committed: Tue May 22 15:46:59 2018 -0700
----------------------------------------------------------------------
.../common/impl/ContainerManagerImpl.java | 14 +---
.../common/impl/ContainerReportManagerImpl.java | 43 +++---------
.../common/interfaces/ContainerManager.java | 7 --
.../interfaces/ContainerReportManager.java | 8 +--
.../statemachine/DatanodeStateMachine.java | 1 -
.../common/statemachine/StateContext.java | 38 ----------
.../states/endpoint/HeartbeatEndpointTask.java | 3 +-
.../container/ozoneimpl/OzoneContainer.java | 9 ---
.../StorageContainerDatanodeProtocol.java | 5 +-
.../protocol/StorageContainerNodeProtocol.java | 5 +-
...rDatanodeProtocolClientSideTranslatorPB.java | 5 +-
...rDatanodeProtocolServerSideTranslatorPB.java | 3 +-
.../StorageContainerDatanodeProtocol.proto | 39 -----------
.../ozone/container/common/ScmTestMock.java | 13 +---
.../common/TestDatanodeStateMachine.java | 7 --
.../hdds/scm/node/HeartbeatQueueItem.java | 23 +-----
.../hadoop/hdds/scm/node/SCMNodeManager.java | 30 +-------
.../scm/server/SCMDatanodeProtocolServer.java | 6 +-
.../hdds/scm/container/MockNodeManager.java | 5 +-
.../hdds/scm/node/TestContainerPlacement.java | 9 +--
.../hadoop/hdds/scm/node/TestNodeManager.java | 74 +++++++++-----------
.../ozone/container/common/TestEndPoint.java | 11 +--
.../testutils/ReplicationNodeManagerMock.java | 5 +-
.../ozone/TestStorageContainerManager.java | 5 +-
24 files changed, 63 insertions(+), 305 deletions(-)
----------------------------------------------------------------------
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
index 3a78c70..faee5d0 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
@@ -1072,16 +1070,8 @@ public class ContainerManagerImpl implements ContainerManager {
@Override
public long getNumKeys(long containerId) {
ContainerData cData = containerMap.get(containerId);
- return cData.getKeyCount(); }
-
- /**
- * Get the container report state to send via HB to SCM.
- *
- * @return container report state.
- */
- @Override
- public ReportState getContainerReportState() {
- return containerReportManager.getContainerReportState();
+ return cData.getKeyCount();
}
+
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
index 6c83c66..f1d3f7f 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerReportManagerImpl.java
@@ -19,15 +19,12 @@ package org.apache.hadoop.ozone.container.common.impl;
import org.apache.commons.lang3.RandomUtils;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.interfaces
.ContainerReportManager;
import org.apache.hadoop.util.Time;
import java.util.concurrent.TimeUnit;
-import java.util.concurrent.atomic.AtomicLong;
import static org.apache.hadoop.hdds.scm.HddsServerUtil.getScmHeartbeatInterval;
@@ -40,15 +37,9 @@ public class ContainerReportManagerImpl implements ContainerReportManager {
private long lastContainerReportTime;
private final long containerReportInterval;
private final long heartbeatInterval;
- private AtomicLong reportCount;
- private static final ReportState NO_CONTAINER_REPORTSTATE =
- ReportState.newBuilder()
- .setState(ReportState.states.noContainerReports)
- .setCount(0).build();
public ContainerReportManagerImpl(Configuration config) {
this.lastContainerReportTime = -1;
- this.reportCount = new AtomicLong(0L);
this.containerReportInterval = config.getTimeDuration(
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL,
OzoneConfigKeys.OZONE_CONTAINER_REPORT_INTERVAL_DEFAULT,
@@ -56,32 +47,18 @@ public class ContainerReportManagerImpl implements ContainerReportManager {
this.heartbeatInterval = getScmHeartbeatInterval(config);
}
- public ReportState getContainerReportState() {
+ public boolean shouldSendContainerReport() {
if (lastContainerReportTime < 0) {
- return getFullContainerReportState();
- } else {
- // Add a random delay (0~30s) on top of the container report
- // interval (60s) so tha the SCM is overwhelmed by the container reports
- // sent in sync.
- if (Time.monotonicNow() - lastContainerReportTime >
- (containerReportInterval + getRandomReportDelay())) {
- return getFullContainerReportState();
- } else {
- return getNoContainerReportState();
- }
+ return true;
}
- }
-
- private ReportState getFullContainerReportState() {
- ReportState.Builder rsBuilder = ReportState.newBuilder();
- rsBuilder.setState(ReportState.states.completeContinerReport);
- rsBuilder.setCount(reportCount.incrementAndGet());
- this.lastContainerReportTime = Time.monotonicNow();
- return rsBuilder.build();
- }
-
- private ReportState getNoContainerReportState() {
- return NO_CONTAINER_REPORTSTATE;
+ // Add a random delay (0~30s) on top of the container report
+ // interval (60s) so tha the SCM is overwhelmed by the container reports
+ // sent in sync.
+ if (Time.monotonicNow() - lastContainerReportTime >
+ (containerReportInterval + getRandomReportDelay())) {
+ return true;
+ }
+ return false;
}
private long getRandomReportDelay() {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
index 84d95f8..3a1a73d 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java
@@ -29,8 +29,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
@@ -266,9 +264,4 @@ public interface ContainerManager extends RwLock {
*/
long getNumKeys(long containerId);
- /**
- * Get the container report state to send via HB to SCM.
- * @return container report state.
- */
- ReportState getContainerReportState();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java
index 4689dfe..6d7557b 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerReportManager.java
@@ -17,16 +17,14 @@
*/
package org.apache.hadoop.ozone.container.common.interfaces;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
-
/**
* Interface for container report manager operations.
*/
public interface ContainerReportManager {
/**
- * Get the container report state.
- * @return the container report state.
+ * Check if we have to send container report.
+ * @return true if container report has to be sent.
*/
- ReportState getContainerReportState();
+ boolean shouldSendContainerReport();
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
index ef1ba59..a16bfdc 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
@@ -135,7 +135,6 @@ public class DatanodeStateMachine implements Closeable {
LOG.debug("Executing cycle Number : {}", context.getExecutionCount());
nextHB.set(Time.monotonicNow() + heartbeatFrequency);
context.setReportState(container.getNodeReport());
- context.setContainerReportState(container.getContainerReportState());
context.execute(executorService, heartbeatFrequency,
TimeUnit.MILLISECONDS);
now = Time.monotonicNow();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
index 55476fd..27eb57e 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/StateContext.java
@@ -18,8 +18,6 @@ package org.apache.hadoop.ozone.container.common.statemachine;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.ozone.container.common.states.DatanodeState;
import org.apache.hadoop.ozone.container.common.states.datanode
@@ -40,9 +38,6 @@ import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
-import static org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState.states
- .noContainerReports;
import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
/**
@@ -58,9 +53,6 @@ public class StateContext {
private final Configuration conf;
private DatanodeStateMachine.DatanodeStates state;
private SCMNodeReport nrState;
- private ReportState reportState;
- private static final ReportState DEFAULT_REPORT_STATE =
- ReportState.newBuilder().setState(noContainerReports).setCount(0).build();
/**
* Constructs a StateContext.
@@ -212,7 +204,6 @@ public class StateContext {
if (isExiting(newState)) {
task.onExit();
}
- this.clearReportState();
this.setState(newState);
}
}
@@ -253,33 +244,4 @@ public class StateContext {
return stateExecutionCount.get();
}
-
- /**
- * Gets the ReportState.
- * @return ReportState.
- */
- public synchronized ReportState getContainerReportState() {
- if (reportState == null) {
- return DEFAULT_REPORT_STATE;
- }
- return reportState;
- }
-
- /**
- * Sets the ReportState.
- * @param rState - ReportState.
- */
- public synchronized void setContainerReportState(ReportState rState) {
- this.reportState = rState;
- }
-
- /**
- * Clears report state after it has been communicated.
- */
- public synchronized void clearReportState() {
- if(reportState != null) {
- setContainerReportState(null);
- }
- }
-
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
index eba565d..2f1db39 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java
@@ -99,8 +99,7 @@ public class HeartbeatEndpointTask
Preconditions.checkState(this.datanodeDetailsProto != null);
SCMHeartbeatResponseProto reponse = rpcEndpoint.getEndPoint()
- .sendHeartbeat(datanodeDetailsProto, this.context.getNodeReport(),
- this.context.getContainerReportState());
+ .sendHeartbeat(datanodeDetailsProto, this.context.getNodeReport());
processResponse(reponse, datanodeDetailsProto);
rpcEndpoint.setLastSuccessfulHeartbeat(ZonedDateTime.now());
rpcEndpoint.zeroMissedCount();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
index 33a5971..1fc79d7 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
@@ -25,8 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsRequestProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
@@ -267,11 +265,4 @@ public class OzoneContainer {
return this.manager;
}
- /**
- * Get the container report state to send via HB to SCM.
- * @return the container report state.
- */
- public ReportState getContainerReportState() {
- return this.manager.getContainerReportState();
- }
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
index 8aa397b..e2a3bf5 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java
@@ -28,8 +28,6 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
@@ -59,12 +57,11 @@ public interface StorageContainerDatanodeProtocol {
* Used by data node to send a Heartbeat.
* @param datanodeDetails - Datanode Details.
* @param nodeReport - node report state
- * @param reportState - container report state.
* @return - SCMHeartbeatResponseProto
* @throws IOException
*/
SCMHeartbeatResponseProto sendHeartbeat(DatanodeDetailsProto datanodeDetails,
- SCMNodeReport nodeReport, ReportState reportState) throws IOException;
+ SCMNodeReport nodeReport) throws IOException;
/**
* Register Datanode.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
index 5d1d434..14038fb 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerNodeProtocol.java
@@ -20,8 +20,6 @@ package org.apache.hadoop.ozone.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
@@ -61,10 +59,9 @@ public interface StorageContainerNodeProtocol {
* Send heartbeat to indicate the datanode is alive and doing well.
* @param datanodeDetails - Datanode ID.
* @param nodeReport - node report.
- * @param reportState - container report.
* @return SCMheartbeat response list
*/
List<SCMCommand> sendHeartbeat(DatanodeDetailsProto datanodeDetails,
- SCMNodeReport nodeReport, ReportState reportState);
+ SCMNodeReport nodeReport);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
index 95d4cb0..a56c57a 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java
@@ -29,8 +29,6 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerReportsResponseProto;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatRequestProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
@@ -133,12 +131,11 @@ public class StorageContainerDatanodeProtocolClientSideTranslatorPB
@Override
public SCMHeartbeatResponseProto sendHeartbeat(
DatanodeDetailsProto datanodeDetailsProto,
- SCMNodeReport nodeReport, ReportState reportState) throws IOException {
+ SCMNodeReport nodeReport) throws IOException {
SCMHeartbeatRequestProto.Builder req = SCMHeartbeatRequestProto
.newBuilder();
req.setDatanodeDetails(datanodeDetailsProto);
req.setNodeReport(nodeReport);
- req.setContainerReportState(reportState);
final SCMHeartbeatResponseProto resp;
try {
resp = rpcProxy.sendHeartbeat(NULL_RPC_CONTROLLER, req.build());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
index 139f04c..07dba57 100644
--- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
+++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java
@@ -88,8 +88,7 @@ public class StorageContainerDatanodeProtocolServerSideTranslatorPB
SCMHeartbeatRequestProto request) throws ServiceException {
try {
return impl.sendHeartbeat(request.getDatanodeDetails(),
- request.getNodeReport(),
- request.getContainerReportState());
+ request.getNodeReport());
} catch (IOException e) {
throw new ServiceException(e);
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
index 2b34d11..91070b3 100644
--- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
+++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto
@@ -42,45 +42,6 @@ import "hdds.proto";
message SCMHeartbeatRequestProto {
required DatanodeDetailsProto datanodeDetails = 1;
optional SCMNodeReport nodeReport = 2;
- optional ReportState containerReportState = 3;
-}
-
-enum DatanodeContainerState {
- closed = 0;
- open = 1;
-}
-
-/**
-NodeState contains messages from datanode to SCM saying that it has
-some information that SCM might be interested in.*/
-message ReportState {
- enum states {
- noContainerReports = 0;
- completeContinerReport = 1;
- deltaContainerReport = 2;
- }
- required states state = 1;
- required int64 count = 2 [default = 0];
-}
-
-
-/**
-This message is used to persist the information about a container in the
-SCM database, This information allows SCM to startup faster and avoid having
-all container info in memory all the time.
- */
-message ContainerPersistanceProto {
- required DatanodeContainerState state = 1;
- required hadoop.hdds.Pipeline pipeline = 2;
- required ContainerInfo info = 3;
-}
-
-/**
-This message is used to do a quick look up of which containers are effected
-if a node goes down
-*/
-message NodeContianerMapping {
- repeated string contianerName = 1;
}
/**
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
index 948367a..c57a366 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
@@ -30,8 +30,6 @@ import org.apache.hadoop.hdds.protocol.proto
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.ContainerInfo;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMCommandResponseProto;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto;
@@ -53,7 +51,6 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
private int rpcResponseDelay;
private AtomicInteger heartbeatCount = new AtomicInteger(0);
private AtomicInteger rpcCount = new AtomicInteger(0);
- private ReportState reportState;
private AtomicInteger containerReportsCount = new AtomicInteger(0);
// Map of datanode to containers
@@ -177,11 +174,10 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
@Override
public StorageContainerDatanodeProtocolProtos.SCMHeartbeatResponseProto
sendHeartbeat(DatanodeDetailsProto datanodeDetailsProto,
- SCMNodeReport nodeReport, ReportState scmReportState)
+ SCMNodeReport nodeReport)
throws IOException {
rpcCount.incrementAndGet();
heartbeatCount.incrementAndGet();
- this.reportState = scmReportState;
sleepIfNeeded();
List<SCMCommandResponseProto>
cmdResponses = new LinkedList<>();
@@ -298,19 +294,12 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
.newBuilder().getDefaultInstanceForType();
}
- public ReportState getReportState() {
- return this.reportState;
- }
-
/**
* Reset the mock Scm for test to get a fresh start without rebuild MockScm.
*/
public void reset() {
heartbeatCount.set(0);
rpcCount.set(0);
- reportState = ReportState.newBuilder()
- .setState(ReportState.states.noContainerReports)
- .setCount(0).build();
containerReportsCount.set(0);
nodeContainers.clear();
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 9a9aab1..ee82c57 100644
--- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -21,8 +21,6 @@ import com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdds.scm.ScmConfigKeys;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
@@ -305,11 +303,6 @@ public class TestDatanodeStateMachine {
for (ScmTestMock mock : mockServers) {
Assert.assertEquals(1, mock.getHeartbeatCount());
- // Assert that heartbeat did indeed carry that State that we said
- // have in the datanode.
- Assert.assertEquals(mock.getReportState().getState().getNumber(),
- StorageContainerDatanodeProtocolProtos.ReportState.states
- .noContainerReports.getNumber());
}
}
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
index 43720f0..05a9fc3 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/HeartbeatQueueItem.java
@@ -21,8 +21,6 @@ package org.apache.hadoop.hdds.scm.node;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import static org.apache.hadoop.util.Time.monotonicNow;
@@ -34,21 +32,18 @@ public class HeartbeatQueueItem {
private DatanodeDetails datanodeDetails;
private long recvTimestamp;
private SCMNodeReport nodeReport;
- private ReportState containerReportState;
/**
*
* @param datanodeDetails - datanode ID of the heartbeat.
* @param recvTimestamp - heartbeat receive timestamp.
* @param nodeReport - node report associated with the heartbeat if any.
- * @param containerReportState - container report state.
*/
HeartbeatQueueItem(DatanodeDetails datanodeDetails, long recvTimestamp,
- SCMNodeReport nodeReport, ReportState containerReportState) {
+ SCMNodeReport nodeReport) {
this.datanodeDetails = datanodeDetails;
this.recvTimestamp = recvTimestamp;
this.nodeReport = nodeReport;
- this.containerReportState = containerReportState;
}
/**
@@ -66,13 +61,6 @@ public class HeartbeatQueueItem {
}
/**
- * @return container report state.
- */
- public ReportState getContainerReportState() {
- return containerReportState;
- }
-
- /**
* @return heartbeat receive timestamp.
*/
public long getRecvTimestamp() {
@@ -85,7 +73,6 @@ public class HeartbeatQueueItem {
public static class Builder {
private DatanodeDetails datanodeDetails;
private SCMNodeReport nodeReport;
- private ReportState containerReportState;
private long recvTimestamp = monotonicNow();
public Builder setDatanodeDetails(DatanodeDetails dnDetails) {
@@ -98,11 +85,6 @@ public class HeartbeatQueueItem {
return this;
}
- public Builder setContainerReportState(ReportState crs) {
- this.containerReportState = crs;
- return this;
- }
-
@VisibleForTesting
public Builder setRecvTimestamp(long recvTime) {
this.recvTimestamp = recvTime;
@@ -110,8 +92,7 @@ public class HeartbeatQueueItem {
}
public HeartbeatQueueItem build() {
- return new HeartbeatQueueItem(datanodeDetails, recvTimestamp, nodeReport,
- containerReportState);
+ return new HeartbeatQueueItem(datanodeDetails, recvTimestamp, nodeReport);
}
}
}
\ No newline at end of file
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index cf1d8a5..353a069 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -31,8 +31,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.DatanodeDetailsProto;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto
@@ -48,7 +46,6 @@ import org.apache.hadoop.ozone.protocol.VersionResponse;
import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
import org.apache.hadoop.ozone.protocol.commands.ReregisterCommand;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
-import org.apache.hadoop.ozone.protocol.commands.SendContainerCommand;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.util.concurrent.HadoopExecutors;
import org.slf4j.Logger;
@@ -609,8 +606,6 @@ public class SCMNodeManager
if (healthyNodes.containsKey(datanodeUuid)) {
healthyNodes.put(datanodeUuid, processTimestamp);
updateNodeStat(datanodeUuid, nodeReport);
- updateCommandQueue(datanodeUuid,
- hbItem.getContainerReportState().getState());
return;
}
@@ -622,8 +617,6 @@ public class SCMNodeManager
healthyNodeCount.incrementAndGet();
staleNodeCount.decrementAndGet();
updateNodeStat(datanodeUuid, nodeReport);
- updateCommandQueue(datanodeUuid,
- hbItem.getContainerReportState().getState());
return;
}
@@ -635,8 +628,6 @@ public class SCMNodeManager
deadNodeCount.decrementAndGet();
healthyNodeCount.incrementAndGet();
updateNodeStat(datanodeUuid, nodeReport);
- updateCommandQueue(datanodeUuid,
- hbItem.getContainerReportState().getState());
return;
}
@@ -671,22 +662,6 @@ public class SCMNodeManager
}
}
- private void updateCommandQueue(UUID dnId,
- ReportState.states containerReportState) {
- if (containerReportState != null) {
- switch (containerReportState) {
- case completeContinerReport:
- commandQueue.addCommand(dnId,
- SendContainerCommand.newBuilder().build());
- return;
- case deltaContainerReport:
- case noContainerReports:
- default:
- // do nothing
- }
- }
- }
-
/**
* Closes this stream and releases any system resources associated with it. If
* the stream is already closed then invoking this method has no effect.
@@ -829,14 +804,12 @@ public class SCMNodeManager
*
* @param datanodeDetailsProto - DatanodeDetailsProto.
* @param nodeReport - node report.
- * @param containerReportState - container report state.
* @return SCMheartbeat response.
* @throws IOException
*/
@Override
public List<SCMCommand> sendHeartbeat(
- DatanodeDetailsProto datanodeDetailsProto, SCMNodeReport nodeReport,
- ReportState containerReportState) {
+ DatanodeDetailsProto datanodeDetailsProto, SCMNodeReport nodeReport) {
Preconditions.checkNotNull(datanodeDetailsProto, "Heartbeat is missing " +
"DatanodeDetails.");
@@ -851,7 +824,6 @@ public class SCMNodeManager
new HeartbeatQueueItem.Builder()
.setDatanodeDetails(datanodeDetails)
.setNodeReport(nodeReport)
- .setContainerReportState(containerReportState)
.build());
return commandQueue.getCommand(datanodeDetails.getUuid());
} else {
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
index 91ed032..58b8c82 100644
--- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
+++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java
@@ -153,12 +153,10 @@ public class SCMDatanodeProtocolServer implements
@Override
public SCMHeartbeatResponseProto sendHeartbeat(
HddsProtos.DatanodeDetailsProto datanodeDetails,
- StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport,
- StorageContainerDatanodeProtocolProtos.ReportState reportState)
+ StorageContainerDatanodeProtocolProtos.SCMNodeReport nodeReport)
throws IOException {
List<SCMCommand> commands =
- scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport,
- reportState);
+ scm.getScmNodeManager().sendHeartbeat(datanodeDetails, nodeReport);
List<SCMCommandResponseProto> cmdResponses = new LinkedList<>();
for (SCMCommand cmd : commands) {
cmdResponses.add(getCommandResponse(cmd, datanodeDetails.getUuid()));
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index d8b8b5e..a46d7ba 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -24,8 +24,6 @@ import org.apache.hadoop.hdfs.protocol.UnregisteredNodeException;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
@@ -387,13 +385,12 @@ public class MockNodeManager implements NodeManager {
*
* @param datanodeDetails - Datanode ID.
* @param nodeReport - node report.
- * @param containerReportState - container report state.
* @return SCMheartbeat response list
*/
@Override
public List<SCMCommand> sendHeartbeat(
HddsProtos.DatanodeDetailsProto datanodeDetails,
- SCMNodeReport nodeReport, ReportState containerReportState) {
+ SCMNodeReport nodeReport) {
if ((datanodeDetails != null) && (nodeReport != null) && (nodeReport
.getStorageReportCount() > 0)) {
SCMNodeStat stat = this.nodeMetricMap.get(datanodeDetails.getUuid());
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
index 321e4e2..09b6cd1 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdds.scm.TestUtils;
import org.apache.hadoop.hdds.scm.XceiverClientManager;
import org.apache.hadoop.hdds.scm.container.ContainerMapping;
import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
-import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
.ContainerPlacementPolicy;
import org.apache.hadoop.hdds.scm.container.placement.algorithms
@@ -34,8 +33,6 @@ import org.apache.hadoop.hdds.scm.container.placement.algorithms
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
@@ -70,10 +67,6 @@ public class TestContainerPlacement {
private static XceiverClientManager xceiverClientManager =
new XceiverClientManager(new OzoneConfiguration());
- private ReportState reportState = ReportState.newBuilder()
- .setState(ReportState.states.noContainerReports)
- .setCount(0).build();
-
/**
* Returns a new copy of Configuration.
*
@@ -143,7 +136,7 @@ public class TestContainerPlacement {
List<SCMStorageReport> reports = TestUtils
.createStorageReport(capacity, used, remaining, path, null, id, 1);
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- TestUtils.createNodeReport(reports), reportState);
+ TestUtils.createNodeReport(reports));
}
GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
index 9fe38ce..36e796f 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeManager.java
@@ -26,10 +26,6 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.SCMNodeReport;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMStorageReport;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.protocol.commands.SCMCommand;
@@ -82,10 +78,6 @@ public class TestNodeManager {
private File testDir;
- private ReportState reportState = ReportState.newBuilder()
- .setState(ReportState.states.noContainerReports)
- .setCount(0).build();
-
@Rule
public ExpectedException thrown = ExpectedException.none();
@@ -153,7 +145,7 @@ public class TestNodeManager {
DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
nodeManager);
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- null, reportState);
+ null);
}
// Wait for 4 seconds max.
@@ -200,8 +192,7 @@ public class TestNodeManager {
// Need 100 nodes to come out of chill mode, only one node is sending HB.
nodeManager.setMinimumChillModeNodes(100);
nodeManager.sendHeartbeat(TestUtils.getDatanodeDetails(nodeManager)
- .getProtoBufMessage(),
- null, reportState);
+ .getProtoBufMessage(), null);
GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
100, 4 * 1000);
assertFalse("Not enough heartbeat, Node manager should have" +
@@ -229,7 +220,7 @@ public class TestNodeManager {
// Send 10 heartbeat from same node, and assert we never leave chill mode.
for (int x = 0; x < 10; x++) {
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- null, reportState);
+ null);
}
GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
@@ -260,7 +251,7 @@ public class TestNodeManager {
// These should never be processed.
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- null, reportState);
+ null);
// Let us just wait for 2 seconds to prove that HBs are not processed.
Thread.sleep(2 * 1000);
@@ -289,8 +280,7 @@ public class TestNodeManager {
nodemanager.register(datanodeDetails.getProtoBufMessage(),
TestUtils.createNodeReport(reports));
List<SCMCommand> command = nodemanager.sendHeartbeat(
- datanodeDetails.getProtoBufMessage(),
- null, reportState);
+ datanodeDetails.getProtoBufMessage(), null);
Assert.assertTrue(nodemanager.getAllNodes().contains(datanodeDetails));
Assert.assertTrue("On regular HB calls, SCM responses a "
+ "datanode with an empty command list", command.isEmpty());
@@ -309,7 +299,7 @@ public class TestNodeManager {
@Override public Boolean get() {
List<SCMCommand> command =
nodemanager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- null, reportState);
+ null);
return command.size() == 1 && command.get(0).getType()
.equals(SCMCmdType.reregisterCommand);
}
@@ -341,7 +331,7 @@ public class TestNodeManager {
DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
nodeManager);
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- null, reportState);
+ null);
}
GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
100, 4 * 1000);
@@ -433,18 +423,18 @@ public class TestNodeManager {
// Heartbeat once
nodeManager.sendHeartbeat(staleNode.getProtoBufMessage(),
- null, reportState);
+ null);
// Heartbeat all other nodes.
for (DatanodeDetails dn : nodeList) {
- nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
+ nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
}
// Wait for 2 seconds .. and heartbeat good nodes again.
Thread.sleep(2 * 1000);
for (DatanodeDetails dn : nodeList) {
- nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
+ nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
}
// Wait for 2 seconds, wait a total of 4 seconds to make sure that the
@@ -461,7 +451,7 @@ public class TestNodeManager {
// heartbeat good nodes again.
for (DatanodeDetails dn : nodeList) {
- nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
+ nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
}
// 6 seconds is the dead window for this test , so we wait a total of
@@ -497,7 +487,7 @@ public class TestNodeManager {
public void testScmCheckForErrorOnNullDatanodeDetails() throws IOException,
InterruptedException, TimeoutException {
try (SCMNodeManager nodeManager = createNodeManager(getConf())) {
- nodeManager.sendHeartbeat(null, null, reportState);
+ nodeManager.sendHeartbeat(null, null);
} catch (NullPointerException npe) {
GenericTestUtils.assertExceptionContains("Heartbeat is missing " +
"DatanodeDetails.", npe);
@@ -575,11 +565,11 @@ public class TestNodeManager {
DatanodeDetails deadNode =
TestUtils.getDatanodeDetails(nodeManager);
nodeManager.sendHeartbeat(
- healthyNode.getProtoBufMessage(), null, reportState);
+ healthyNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- staleNode.getProtoBufMessage(), null, reportState);
+ staleNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- deadNode.getProtoBufMessage(), null, reportState);
+ deadNode.getProtoBufMessage(), null);
// Sleep so that heartbeat processing thread gets to run.
Thread.sleep(500);
@@ -606,15 +596,15 @@ public class TestNodeManager {
*/
nodeManager.sendHeartbeat(
- healthyNode.getProtoBufMessage(), null, reportState);
+ healthyNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- staleNode.getProtoBufMessage(), null, reportState);
+ staleNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- deadNode.getProtoBufMessage(), null, reportState);
+ deadNode.getProtoBufMessage(), null);
Thread.sleep(1500);
nodeManager.sendHeartbeat(
- healthyNode.getProtoBufMessage(), null, reportState);
+ healthyNode.getProtoBufMessage(), null);
Thread.sleep(2 * 1000);
assertEquals(1, nodeManager.getNodeCount(HEALTHY));
@@ -635,12 +625,12 @@ public class TestNodeManager {
*/
nodeManager.sendHeartbeat(
- healthyNode.getProtoBufMessage(), null, reportState);
+ healthyNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- staleNode.getProtoBufMessage(), null, reportState);
+ staleNode.getProtoBufMessage(), null);
Thread.sleep(1500);
nodeManager.sendHeartbeat(
- healthyNode.getProtoBufMessage(), null, reportState);
+ healthyNode.getProtoBufMessage(), null);
Thread.sleep(2 * 1000);
// 3.5 seconds have elapsed for stale node, so it moves into Stale.
@@ -674,11 +664,11 @@ public class TestNodeManager {
* back all the nodes in healthy state.
*/
nodeManager.sendHeartbeat(
- healthyNode.getProtoBufMessage(), null, reportState);
+ healthyNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- staleNode.getProtoBufMessage(), null, reportState);
+ staleNode.getProtoBufMessage(), null);
nodeManager.sendHeartbeat(
- deadNode.getProtoBufMessage(), null, reportState);
+ deadNode.getProtoBufMessage(), null);
Thread.sleep(500);
//Assert all nodes are healthy.
assertEquals(3, nodeManager.getAllNodes().size());
@@ -699,7 +689,7 @@ public class TestNodeManager {
int sleepDuration) throws InterruptedException {
while (!Thread.currentThread().isInterrupted()) {
for (DatanodeDetails dn : list) {
- manager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
+ manager.sendHeartbeat(dn.getProtoBufMessage(), null);
}
Thread.sleep(sleepDuration);
}
@@ -785,7 +775,7 @@ public class TestNodeManager {
// No Thread just one time HBs the node manager, so that these will be
// marked as dead nodes eventually.
for (DatanodeDetails dn : deadNodeList) {
- nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null, reportState);
+ nodeManager.sendHeartbeat(dn.getProtoBufMessage(), null);
}
@@ -950,7 +940,7 @@ public class TestNodeManager {
DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(
nodeManager);
nodeManager.sendHeartbeat(
- datanodeDetails.getProtoBufMessage(), null, reportState);
+ datanodeDetails.getProtoBufMessage(), null);
String status = nodeManager.getChillModeStatus();
Assert.assertThat(status, containsString("Still in chill " +
"mode, waiting on nodes to report in."));
@@ -978,7 +968,7 @@ public class TestNodeManager {
for (int x = 0; x < 20; x++) {
DatanodeDetails datanode = TestUtils.getDatanodeDetails(nodeManager);
nodeManager.sendHeartbeat(datanode.getProtoBufMessage(),
- null, reportState);
+ null);
}
Thread.sleep(500);
@@ -1023,7 +1013,7 @@ public class TestNodeManager {
.createStorageReport(capacity, used, free, storagePath,
null, dnId, 1);
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- TestUtils.createNodeReport(reports), reportState);
+ TestUtils.createNodeReport(reports));
}
GenericTestUtils.waitFor(() -> nodeManager.waitForHeartbeatProcessed(),
100, 4 * 1000);
@@ -1073,7 +1063,7 @@ public class TestNodeManager {
null, dnId, 1);
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- TestUtils.createNodeReport(reports), reportState);
+ TestUtils.createNodeReport(reports));
Thread.sleep(100);
}
@@ -1154,7 +1144,7 @@ public class TestNodeManager {
.createStorageReport(capacity, expectedScmUsed, expectedRemaining,
storagePath, null, dnId, 1);
nodeManager.sendHeartbeat(datanodeDetails.getProtoBufMessage(),
- TestUtils.createNodeReport(reports), reportState);
+ TestUtils.createNodeReport(reports));
// Wait up to 5 seconds so that the dead node becomes healthy
// Verify usage info should be updated.
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
index 9ac1467..e82dc98 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
@@ -71,9 +71,6 @@ import java.util.UUID;
import static org.apache.hadoop.hdds.scm.TestUtils.getDatanodeDetails;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState.states
- .noContainerReports;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static org.apache.hadoop.ozone.container.common.ContainerTestUtils
.createEndpoint;
@@ -88,8 +85,6 @@ public class TestEndPoint {
private static RPC.Server scmServer;
private static ScmTestMock scmServerImpl;
private static File testDir;
- private static StorageContainerDatanodeProtocolProtos.ReportState
- defaultReportState;
@AfterClass
public static void tearDown() throws Exception {
@@ -106,9 +101,6 @@ public class TestEndPoint {
scmServer = SCMTestUtils.startScmRpcServer(SCMTestUtils.getConf(),
scmServerImpl, serverAddress, 10);
testDir = PathUtils.getTestDir(TestEndPoint.class);
- defaultReportState = StorageContainerDatanodeProtocolProtos.
- ReportState.newBuilder().setState(noContainerReports).
- setCount(0).build();
}
@Test
@@ -305,8 +297,7 @@ public class TestEndPoint {
String storageId = UUID.randomUUID().toString();
SCMHeartbeatResponseProto responseProto = rpcEndPoint.getEndPoint()
.sendHeartbeat(dataNode.getProtoBufMessage(),
- TestUtils.createNodeReport(getStorageReports(storageId)),
- defaultReportState);
+ TestUtils.createNodeReport(getStorageReports(storageId)));
Assert.assertNotNull(responseProto);
Assert.assertEquals(0, responseProto.getCommandsCount());
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
----------------------------------------------------------------------
diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
index b49b71b..3f814d0 100644
--- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
+++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationNodeManagerMock.java
@@ -27,8 +27,6 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto
- .StorageContainerDatanodeProtocolProtos.ReportState;
-import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMNodeReport;
import org.apache.hadoop.hdds.protocol.proto
.StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
@@ -293,12 +291,11 @@ public class ReplicationNodeManagerMock implements NodeManager {
*
* @param dd - Datanode Details.
* @param nodeReport - node report.
- * @param containerReportState - container report state.
* @return SCMheartbeat response list
*/
@Override
public List<SCMCommand> sendHeartbeat(HddsProtos.DatanodeDetailsProto dd,
- SCMNodeReport nodeReport, ReportState containerReportState) {
+ SCMNodeReport nodeReport) {
return null;
}
http://git-wip-us.apache.org/repos/asf/hadoop/blob/68c7fd8e/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
----------------------------------------------------------------------
diff --git a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
index 0081f0d..a0d41a8 100644
--- a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
+++ b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManager.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeType;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
-import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ReportState;
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.SCMCmdType;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
import org.apache.hadoop.hdds.scm.server.StorageContainerManager.StartupOption;
@@ -302,11 +301,9 @@ public class TestStorageContainerManager {
GenericTestUtils.waitFor(() -> {
NodeManager nodeManager = cluster.getStorageContainerManager()
.getScmNodeManager();
- ReportState reportState = ReportState.newBuilder()
- .setState(ReportState.states.noContainerReports).setCount(0).build();
List<SCMCommand> commands = nodeManager.sendHeartbeat(
nodeManager.getNodes(NodeState.HEALTHY).get(0).getProtoBufMessage(),
- null, reportState);
+ null);
if (commands != null) {
for (SCMCommand cmd : commands) {
---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscribe@hadoop.apache.org
For additional commands, e-mail: common-commits-help@hadoop.apache.org