You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by ca...@apache.org on 2022/11/10 05:03:34 UTC
[iotdb] branch master updated: [IOTDB-4851] Perfect ChangeLeader method in RegionMigrateProcedure (#7948)
This is an automated email from the ASF dual-hosted git repository.
caogaofei pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/iotdb.git
The following commit(s) were added to refs/heads/master by this push:
new c1dafdd072 [IOTDB-4851] Perfect ChangeLeader method in RegionMigrateProcedure (#7948)
c1dafdd072 is described below
commit c1dafdd07241841aedfc21f5858c3e1ae90c17cb
Author: Beyyes <cg...@foxmail.com>
AuthorDate: Thu Nov 10 13:03:29 2022 +0800
[IOTDB-4851] Perfect ChangeLeader method in RegionMigrateProcedure (#7948)
---
.../client/sync/SyncDataNodeClientPool.java | 98 ++++++++++++++--------
.../manager/load/balancer/RouteBalancer.java | 5 ++
.../confignode/persistence/node/NodeInfo.java | 4 +-
.../procedure/env/DataNodeRemoveHandler.java | 78 +++++++++++------
.../impl/statemachine/RegionMigrateProcedure.java | 19 ++---
docs/UserGuide/Cluster/Cluster-Setup.md | 2 +-
docs/zh/UserGuide/Cluster/Cluster-Setup.md | 2 +-
.../impl/DataNodeInternalRPCServiceImpl.java | 2 +-
8 files changed, 133 insertions(+), 77 deletions(-)
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java b/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java
index d50210bb4d..caa8528b4c 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/client/sync/SyncDataNodeClientPool.java
@@ -50,7 +50,7 @@ public class SyncDataNodeClientPool {
private static final Logger LOGGER = LoggerFactory.getLogger(SyncDataNodeClientPool.class);
- private static final int retryNum = 6;
+ private static final int DEFAULT_RETRY_NUM = 6;
private final IClientManager<TEndPoint, SyncDataNodeInternalServiceClient> clientManager;
@@ -64,41 +64,31 @@ public class SyncDataNodeClientPool {
public TSStatus sendSyncRequestToDataNodeWithRetry(
TEndPoint endPoint, Object req, DataNodeRequestType requestType) {
Throwable lastException = null;
+ for (int retry = 0; retry < DEFAULT_RETRY_NUM; retry++) {
+ try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) {
+ return executeSyncRequest(requestType, client, req);
+ } catch (TException | IOException e) {
+ lastException = e;
+ LOGGER.warn(
+ "{} failed on DataNode {}, because {}, retrying {}...",
+ requestType,
+ endPoint,
+ e.getMessage(),
+ retry);
+ doRetryWait(retry);
+ }
+ }
+ LOGGER.error("{} failed on DataNode {}", requestType, endPoint, lastException);
+ return new TSStatus(TSStatusCode.ALL_RETRY_FAILED.getStatusCode())
+ .setMessage("All retry failed due to: " + lastException.getMessage());
+ }
+
+ public TSStatus sendSyncRequestToDataNodeWithGivenRetry(
+ TEndPoint endPoint, Object req, DataNodeRequestType requestType, int retryNum) {
+ Throwable lastException = new TException();
for (int retry = 0; retry < retryNum; retry++) {
try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(endPoint)) {
- switch (requestType) {
- case INVALIDATE_PARTITION_CACHE:
- return client.invalidatePartitionCache((TInvalidateCacheReq) req);
- case INVALIDATE_SCHEMA_CACHE:
- return client.invalidateSchemaCache((TInvalidateCacheReq) req);
- case CREATE_SCHEMA_REGION:
- return client.createSchemaRegion((TCreateSchemaRegionReq) req);
- case CREATE_DATA_REGION:
- return client.createDataRegion((TCreateDataRegionReq) req);
- case DELETE_REGION:
- return client.deleteRegion((TConsensusGroupId) req);
- case INVALIDATE_PERMISSION_CACHE:
- return client.invalidatePermissionCache((TInvalidatePermissionCacheReq) req);
- case DISABLE_DATA_NODE:
- return client.disableDataNode((TDisableDataNodeReq) req);
- case STOP_DATA_NODE:
- return client.stopDataNode();
- case SET_SYSTEM_STATUS:
- return client.setSystemStatus((String) req);
- case UPDATE_TEMPLATE:
- return client.updateTemplate((TUpdateTemplateReq) req);
- case CREATE_NEW_REGION_PEER:
- return client.createNewRegionPeer((TCreatePeerReq) req);
- case ADD_REGION_PEER:
- return client.addRegionPeer((TMaintainPeerReq) req);
- case REMOVE_REGION_PEER:
- return client.removeRegionPeer((TMaintainPeerReq) req);
- case DELETE_OLD_REGION_PEER:
- return client.deleteOldRegionPeer((TMaintainPeerReq) req);
- default:
- return RpcUtils.getStatus(
- TSStatusCode.EXECUTE_STATEMENT_ERROR, "Unknown request type: " + requestType);
- }
+ return executeSyncRequest(requestType, client, req);
} catch (TException | IOException e) {
lastException = e;
LOGGER.warn(
@@ -115,6 +105,44 @@ public class SyncDataNodeClientPool {
.setMessage("All retry failed due to: " + lastException.getMessage());
}
+ private TSStatus executeSyncRequest(
+ DataNodeRequestType requestType, SyncDataNodeInternalServiceClient client, Object req)
+ throws TException {
+ switch (requestType) {
+ case INVALIDATE_PARTITION_CACHE:
+ return client.invalidatePartitionCache((TInvalidateCacheReq) req);
+ case INVALIDATE_SCHEMA_CACHE:
+ return client.invalidateSchemaCache((TInvalidateCacheReq) req);
+ case CREATE_SCHEMA_REGION:
+ return client.createSchemaRegion((TCreateSchemaRegionReq) req);
+ case CREATE_DATA_REGION:
+ return client.createDataRegion((TCreateDataRegionReq) req);
+ case DELETE_REGION:
+ return client.deleteRegion((TConsensusGroupId) req);
+ case INVALIDATE_PERMISSION_CACHE:
+ return client.invalidatePermissionCache((TInvalidatePermissionCacheReq) req);
+ case DISABLE_DATA_NODE:
+ return client.disableDataNode((TDisableDataNodeReq) req);
+ case STOP_DATA_NODE:
+ return client.stopDataNode();
+ case SET_SYSTEM_STATUS:
+ return client.setSystemStatus((String) req);
+ case UPDATE_TEMPLATE:
+ return client.updateTemplate((TUpdateTemplateReq) req);
+ case CREATE_NEW_REGION_PEER:
+ return client.createNewRegionPeer((TCreatePeerReq) req);
+ case ADD_REGION_PEER:
+ return client.addRegionPeer((TMaintainPeerReq) req);
+ case REMOVE_REGION_PEER:
+ return client.removeRegionPeer((TMaintainPeerReq) req);
+ case DELETE_OLD_REGION_PEER:
+ return client.deleteOldRegionPeer((TMaintainPeerReq) req);
+ default:
+ return RpcUtils.getStatus(
+ TSStatusCode.EXECUTE_STATEMENT_ERROR, "Unknown request type: " + requestType);
+ }
+ }
+
private void doRetryWait(int retryNum) {
try {
TimeUnit.MILLISECONDS.sleep(100L * (long) Math.pow(2, retryNum));
@@ -134,7 +162,7 @@ public class SyncDataNodeClientPool {
*/
public TSStatus changeRegionLeader(
TConsensusGroupId regionId, TEndPoint dataNode, TDataNodeLocation newLeaderNode) {
- LOGGER.info("send RPC to data node: {} for changing regions leader on it", dataNode);
+ LOGGER.info("Send RPC to data node: {} for changing regions leader on it", dataNode);
TSStatus status;
try (SyncDataNodeInternalServiceClient client = clientManager.borrowClient(dataNode)) {
TRegionLeaderChangeReq req = new TRegionLeaderChangeReq(regionId, newLeaderNode);
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
index e9d21891a8..76b37dff45 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
@@ -313,6 +313,11 @@ public class RouteBalancer {
}
}
+ public void changeLeaderForMultiLeaderConsensus(
+ TConsensusGroupId regionGroupId, int newLeaderId) {
+ regionRouteMap.setLeader(regionGroupId, newLeaderId);
+ }
+
private void changeRegionLeader(
String consensusProtocolClass,
AtomicInteger requestId,
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java
index ad2f4d23b3..cb6e0f22ca 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/persistence/node/NodeInfo.java
@@ -165,7 +165,7 @@ public class NodeInfo implements SnapshotProcessor {
*/
public TSStatus removeDataNode(RemoveDataNodePlan req) {
LOGGER.info(
- "{}, There are {} data node in cluster before executed remove-datanode.sh",
+ "{}, There are {} data node in cluster before executed RemoveDataNodePlan",
REMOVE_DATANODE_PROCESS,
registeredDataNodes.size());
@@ -181,7 +181,7 @@ public class NodeInfo implements SnapshotProcessor {
dataNodeInfoReadWriteLock.writeLock().unlock();
}
LOGGER.info(
- "{}, There are {} data node in cluster after executed remove-datanode.sh",
+ "{}, There are {} data node in cluster after executed RemoveDataNodePlan",
REMOVE_DATANODE_PROCESS,
registeredDataNodes.size());
return new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
index beabdbbdf8..8575c565a6 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/env/DataNodeRemoveHandler.java
@@ -26,7 +26,6 @@ import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet;
import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.cluster.NodeStatus;
import org.apache.iotdb.confignode.client.DataNodeRequestType;
-import org.apache.iotdb.confignode.client.async.AsyncDataNodeClientPool;
import org.apache.iotdb.confignode.client.sync.SyncDataNodeClientPool;
import org.apache.iotdb.confignode.conf.ConfigNodeConfig;
import org.apache.iotdb.confignode.conf.ConfigNodeDescriptor;
@@ -37,7 +36,6 @@ import org.apache.iotdb.confignode.manager.ConfigManager;
import org.apache.iotdb.confignode.manager.node.heartbeat.BaseNodeCache;
import org.apache.iotdb.confignode.persistence.node.NodeInfo;
import org.apache.iotdb.confignode.procedure.scheduler.LockQueue;
-import org.apache.iotdb.consensus.ConsensusFactory;
import org.apache.iotdb.mpp.rpc.thrift.TCreatePeerReq;
import org.apache.iotdb.mpp.rpc.thrift.TDisableDataNodeReq;
import org.apache.iotdb.mpp.rpc.thrift.TMaintainPeerReq;
@@ -53,6 +51,8 @@ import java.util.Optional;
import java.util.stream.Collectors;
import static org.apache.iotdb.confignode.conf.ConfigNodeConstant.REMOVE_DATANODE_PROCESS;
+import static org.apache.iotdb.consensus.ConsensusFactory.MULTI_LEADER_CONSENSUS;
+import static org.apache.iotdb.consensus.ConsensusFactory.SIMPLE_CONSENSUS;
public class DataNodeRemoveHandler {
private static final Logger LOGGER = LoggerFactory.getLogger(DataNodeRemoveHandler.class);
@@ -312,20 +312,6 @@ public class DataNodeRemoveHandler {
public TSStatus deleteOldRegionPeer(
TDataNodeLocation originalDataNode, TConsensusGroupId regionId) {
- // When DataReplicationFactor==1, execute deleteOldRegionPeer method will cause error
- // User must delete the related data manually
- // TODO if multi-leader supports deleteOldRegionPeer when DataReplicationFactor==1?
- if (CONF.getDataReplicationFactor() == 1
- && TConsensusGroupType.DataRegion.equals(regionId.getType())) {
- String errorMessage =
- "deleteOldRegionPeer is not supported for dataRegion when DataReplicationFactor equals 1, "
- + "you are supposed to delete the region data of datanode manually";
- LOGGER.info("{}, {}", REMOVE_DATANODE_PROCESS, errorMessage);
- TSStatus status = new TSStatus(TSStatusCode.MIGRATE_REGION_ERROR.getStatusCode());
- status.setMessage(errorMessage);
- return status;
- }
-
TSStatus status;
TMaintainPeerReq maintainPeerReq = new TMaintainPeerReq(regionId, originalDataNode);
status =
@@ -412,18 +398,21 @@ public class DataNodeRemoveHandler {
* @param dataNode old data node
*/
public void stopDataNode(TDataNodeLocation dataNode) {
+ LOGGER.info(
+ "{}, Begin to stop DataNode and kill the DataNode process {}",
+ REMOVE_DATANODE_PROCESS,
+ dataNode);
- AsyncDataNodeClientPool.getInstance().resetClient(dataNode.getInternalEndPoint());
TSStatus status =
SyncDataNodeClientPool.getInstance()
- .sendSyncRequestToDataNodeWithRetry(
- dataNode.getInternalEndPoint(), dataNode, DataNodeRequestType.STOP_DATA_NODE);
+ .sendSyncRequestToDataNodeWithGivenRetry(
+ dataNode.getInternalEndPoint(), dataNode, DataNodeRequestType.STOP_DATA_NODE, 2);
configManager.getNodeManager().removeNodeCache(dataNode.getDataNodeId());
LOGGER.info(
- "{}, Stop DataNode execute finished, DataNode: {}, result: {}",
+ "{}, Stop Data Node result: {}, stoppedDataNode: {}",
REMOVE_DATANODE_PROCESS,
- dataNode,
- status);
+ status,
+ dataNode);
}
/**
@@ -544,15 +533,52 @@ public class DataNodeRemoveHandler {
configManager.getConsensusManager().write(new RemoveDataNodePlan(removeDataNodes));
}
- public void changeRegionLeader(TConsensusGroupId regionId, TDataNodeLocation originalDataNode) {
+ /**
+ * Change the leader of given Region.
+ *
+ * <p>For MULTI_LEADER_CONSENSUS, using `changeLeaderForMultiLeaderConsensus` method to change the
+ * regionLeaderMap maintained in ConfigNode.
+ *
+ * <p>For RATIS_CONSENSUS, invoking `changeRegionLeader` DataNode RPC method to change the leader.
+ *
+ * @param regionId The region to be migrated
+ * @param originalDataNode The DataNode where the region locates
+ * @param migrateDestDataNode The DataNode where the region is to be migrated
+ */
+ public void changeRegionLeader(
+ TConsensusGroupId regionId,
+ TDataNodeLocation originalDataNode,
+ TDataNodeLocation migrateDestDataNode) {
Optional<TDataNodeLocation> newLeaderNode =
filterDataNodeWithOtherRegionReplica(regionId, originalDataNode);
+
+ if (TConsensusGroupType.DataRegion.equals(regionId.getType())
+ && MULTI_LEADER_CONSENSUS.equals(CONF.getDataRegionConsensusProtocolClass())) {
+ if (CONF.getDataReplicationFactor() == 1) {
+ newLeaderNode = Optional.of(migrateDestDataNode);
+ }
+ if (newLeaderNode.isPresent()) {
+ configManager
+ .getLoadManager()
+ .getRouteBalancer()
+ .changeLeaderForMultiLeaderConsensus(regionId, newLeaderNode.get().getDataNodeId());
+
+ LOGGER.info(
+ "{}, Change region leader finished for MULTI_LEADER_CONSENSUS, regionId: {}, newLeaderNode: {}",
+ REMOVE_DATANODE_PROCESS,
+ regionId,
+ newLeaderNode);
+ }
+
+ return;
+ }
+
if (newLeaderNode.isPresent()) {
SyncDataNodeClientPool.getInstance()
.changeRegionLeader(
regionId, originalDataNode.getInternalEndPoint(), newLeaderNode.get());
LOGGER.info(
- "{}, Change region leader finished, regionId: {}, newLeaderNode: {}",
+ "{}, Change region leader finished for RATIS_CONSENSUS, regionId: {}, newLeaderNode: {}",
REMOVE_DATANODE_PROCESS,
regionId,
newLeaderNode);
@@ -610,8 +636,8 @@ public class DataNodeRemoveHandler {
*/
private TSStatus checkClusterProtocol() {
TSStatus status = new TSStatus(TSStatusCode.SUCCESS_STATUS.getStatusCode());
- if (CONF.getDataRegionConsensusProtocolClass().equals(ConsensusFactory.SIMPLE_CONSENSUS)
- || CONF.getSchemaRegionConsensusProtocolClass().equals(ConsensusFactory.SIMPLE_CONSENSUS)) {
+ if (CONF.getDataRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS)
+ || CONF.getSchemaRegionConsensusProtocolClass().equals(SIMPLE_CONSENSUS)) {
status.setCode(TSStatusCode.REMOVE_DATANODE_FAILED.getStatusCode());
status.setMessage("SimpleConsensus protocol is not supported to remove data node");
}
diff --git a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/statemachine/RegionMigrateProcedure.java b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/statemachine/RegionMigrateProcedure.java
index 27e0d120b0..f15ed67489 100644
--- a/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/statemachine/RegionMigrateProcedure.java
+++ b/confignode/src/main/java/org/apache/iotdb/confignode/procedure/impl/statemachine/RegionMigrateProcedure.java
@@ -25,6 +25,7 @@ import org.apache.iotdb.common.rpc.thrift.TSStatus;
import org.apache.iotdb.commons.exception.runtime.ThriftSerDeException;
import org.apache.iotdb.commons.utils.ThriftCommonsSerDeUtils;
import org.apache.iotdb.confignode.procedure.env.ConfigNodeProcedureEnv;
+import org.apache.iotdb.confignode.procedure.env.DataNodeRemoveHandler;
import org.apache.iotdb.confignode.procedure.exception.ProcedureException;
import org.apache.iotdb.confignode.procedure.state.ProcedureLockState;
import org.apache.iotdb.confignode.procedure.state.RegionTransitionState;
@@ -82,17 +83,18 @@ public class RegionMigrateProcedure
return Flow.NO_MORE_STATE;
}
TSStatus tsStatus;
+ DataNodeRemoveHandler handler = env.getDataNodeRemoveHandler();
try {
switch (state) {
case REGION_MIGRATE_PREPARE:
setNextState(RegionTransitionState.CREATE_NEW_REGION_PEER);
break;
case CREATE_NEW_REGION_PEER:
- env.getDataNodeRemoveHandler().createNewRegionPeer(consensusGroupId, destDataNode);
+ handler.createNewRegionPeer(consensusGroupId, destDataNode);
setNextState(RegionTransitionState.ADD_REGION_PEER);
break;
case ADD_REGION_PEER:
- tsStatus = env.getDataNodeRemoveHandler().addRegionPeer(destDataNode, consensusGroupId);
+ tsStatus = handler.addRegionPeer(destDataNode, consensusGroupId);
if (tsStatus.getCode() == SUCCESS_STATUS.getStatusCode()) {
waitForOneMigrationStepFinished(consensusGroupId, state);
} else {
@@ -101,13 +103,11 @@ public class RegionMigrateProcedure
setNextState(RegionTransitionState.CHANGE_REGION_LEADER);
break;
case CHANGE_REGION_LEADER:
- env.getDataNodeRemoveHandler().changeRegionLeader(consensusGroupId, originalDataNode);
+ handler.changeRegionLeader(consensusGroupId, originalDataNode, destDataNode);
setNextState(RegionTransitionState.REMOVE_REGION_PEER);
break;
case REMOVE_REGION_PEER:
- tsStatus =
- env.getDataNodeRemoveHandler()
- .removeRegionPeer(originalDataNode, destDataNode, consensusGroupId);
+ tsStatus = handler.removeRegionPeer(originalDataNode, destDataNode, consensusGroupId);
if (tsStatus.getCode() == SUCCESS_STATUS.getStatusCode()) {
waitForOneMigrationStepFinished(consensusGroupId, state);
} else {
@@ -116,9 +116,7 @@ public class RegionMigrateProcedure
setNextState(RegionTransitionState.DELETE_OLD_REGION_PEER);
break;
case DELETE_OLD_REGION_PEER:
- tsStatus =
- env.getDataNodeRemoveHandler()
- .deleteOldRegionPeer(originalDataNode, consensusGroupId);
+ tsStatus = handler.deleteOldRegionPeer(originalDataNode, consensusGroupId);
if (tsStatus.getCode() == SUCCESS_STATUS.getStatusCode()) {
waitForOneMigrationStepFinished(consensusGroupId, state);
}
@@ -127,8 +125,7 @@ public class RegionMigrateProcedure
setNextState(RegionTransitionState.UPDATE_REGION_LOCATION_CACHE);
break;
case UPDATE_REGION_LOCATION_CACHE:
- env.getDataNodeRemoveHandler()
- .updateRegionLocationCache(consensusGroupId, originalDataNode, destDataNode);
+ handler.updateRegionLocationCache(consensusGroupId, originalDataNode, destDataNode);
return Flow.NO_MORE_STATE;
}
} catch (Exception e) {
diff --git a/docs/UserGuide/Cluster/Cluster-Setup.md b/docs/UserGuide/Cluster/Cluster-Setup.md
index d1b78304b2..5ae4d0a3dd 100644
--- a/docs/UserGuide/Cluster/Cluster-Setup.md
+++ b/docs/UserGuide/Cluster/Cluster-Setup.md
@@ -326,7 +326,7 @@ It costs 0.012s
+ Remove a ConfigNode:
```
-./cluster0/confignode/sbin/remove-confignode.sh -r 0.0.0.0:22279
+./cluster0/confignode/sbin/remove-confignode.sh 127.0.0.1:22279
```
+ Remove a DataNode:
diff --git a/docs/zh/UserGuide/Cluster/Cluster-Setup.md b/docs/zh/UserGuide/Cluster/Cluster-Setup.md
index 4f63e5cbb8..ef7d4960b1 100644
--- a/docs/zh/UserGuide/Cluster/Cluster-Setup.md
+++ b/docs/zh/UserGuide/Cluster/Cluster-Setup.md
@@ -326,7 +326,7 @@ It costs 0.012s
+ 缩容一个 ConfigNode:
```
-./cluster0/confignode/sbin/remove-confignode.sh -r 0.0.0.0:22279
+./cluster0/confignode/sbin/remove-confignode.sh 127.0.0.1:22279
```
+ 缩容一个 DataNode:
diff --git a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
index b182ba3044..f691488eb6 100644
--- a/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
+++ b/server/src/main/java/org/apache/iotdb/db/service/thrift/impl/DataNodeInternalRPCServiceImpl.java
@@ -1493,7 +1493,7 @@ public class DataNodeInternalRPCServiceImpl implements IDataNodeRPCService.Iface
DataNode.getInstance().stop();
status.setMessage("stop datanode succeed");
} catch (Exception e) {
- LOGGER.error("stop Data Node error", e);
+ LOGGER.error("Stop Data Node error", e);
status.setCode(TSStatusCode.DATANODE_STOP_ERROR.getStatusCode());
status.setMessage(e.getMessage());
}