You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@iotdb.apache.org by yo...@apache.org on 2024/03/06 12:36:38 UTC

(iotdb) branch region-multi-database updated: combinatori test

This is an automated email from the ASF dual-hosted git repository.

yongzao pushed a commit to branch region-multi-database
in repository https://gitbox.apache.org/repos/asf/iotdb.git


The following commit(s) were added to refs/heads/region-multi-database by this push:
     new 94b2e154e07 combinatori test
94b2e154e07 is described below

commit 94b2e154e07f715ac84b91c84990ee4d3ac9692c
Author: YongzaoDan <53...@qq.com>
AuthorDate: Wed Mar 6 20:36:26 2024 +0800

    combinatori test
---
 .../manager/load/balancer/RouteBalancer.java       |   2 +-
 .../router/leader/GreedyLeaderBalancer.java        |   2 +-
 .../router/leader/MinCostFlowLeaderBalancer.java   |  33 ++--
 .../persistence/partition/PartitionInfo.java       |  13 +-
 ...orAndLeaderBalancerCombinatorialManualTest.java | 201 +++++++++++++++++++++
 .../GreedyCopySetRegionGroupAllocatorTest.java     |   9 +-
 .../router/leader/GreedyLeaderBalancerTest.java    |   8 +-
 .../leader/LeaderBalancerComparisonTest.java       |   4 +-
 .../leader/MinCostFlowLeaderBalancerTest.java      |  20 +-
 9 files changed, 247 insertions(+), 45 deletions(-)

diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
index deca1df1ec3..8acdc192a36 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/RouteBalancer.java
@@ -140,7 +140,7 @@ public class RouteBalancer {
     Map<TConsensusGroupId, Integer> currentLeaderMap = getLoadManager().getRegionLeaderMap();
     Map<TConsensusGroupId, Integer> optimalLeaderMap =
         leaderBalancer.generateOptimalLeaderDistribution(
-          getPartitionManager().getAllRegionGroupIdMap(regionGroupType),
+            getPartitionManager().getAllRegionGroupIdMap(regionGroupType),
             getPartitionManager().getAllReplicaSetsMap(regionGroupType),
             currentLeaderMap,
             getNodeManager()
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancer.java
index 723c8462b7a..5619312f4e7 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancer.java
@@ -47,7 +47,7 @@ public class GreedyLeaderBalancer implements ILeaderBalancer {
 
   @Override
   public Map<TConsensusGroupId, Integer> generateOptimalLeaderDistribution(
-    Map<String, List<TConsensusGroupId>> databaseRegionGroupMap,
+      Map<String, List<TConsensusGroupId>> databaseRegionGroupMap,
       Map<TConsensusGroupId, TRegionReplicaSet> regionReplicaSetMap,
       Map<TConsensusGroupId, Integer> regionLeaderMap,
       Set<Integer> disabledDataNodeSet) {
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancer.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancer.java
index d69c50b9843..517f35a569f 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancer.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancer.java
@@ -355,22 +355,23 @@ public class MinCostFlowLeaderBalancer implements ILeaderBalancer {
     Map<TConsensusGroupId, Integer> result = new ConcurrentHashMap<>();
 
     databaseRegionGroupMap.forEach(
-        (database, regionGroupIds) -> regionGroupIds.forEach(
-            regionGroupId -> {
-              boolean matchLeader = false;
-              for (int currentEdge = nodeHeadEdge[rNodeMap.get(regionGroupId)];
-                  currentEdge >= 0;
-                  currentEdge = minCostFlowEdges.get(currentEdge).nextEdge) {
-                MinCostFlowEdge edge = minCostFlowEdges.get(currentEdge);
-                if (edge.destNode != S_NODE && edge.capacity == 0) {
-                  matchLeader = true;
-                  result.put(regionGroupId, sDNodeReflect.get(database).get(edge.destNode));
-                }
-              }
-              if (!matchLeader) {
-                result.put(regionGroupId, regionLeaderMap.getOrDefault(regionGroupId, -1));
-              }
-            }));
+        (database, regionGroupIds) ->
+            regionGroupIds.forEach(
+                regionGroupId -> {
+                  boolean matchLeader = false;
+                  for (int currentEdge = nodeHeadEdge[rNodeMap.get(regionGroupId)];
+                      currentEdge >= 0;
+                      currentEdge = minCostFlowEdges.get(currentEdge).nextEdge) {
+                    MinCostFlowEdge edge = minCostFlowEdges.get(currentEdge);
+                    if (edge.destNode != S_NODE && edge.capacity == 0) {
+                      matchLeader = true;
+                      result.put(regionGroupId, sDNodeReflect.get(database).get(edge.destNode));
+                    }
+                  }
+                  if (!matchLeader) {
+                    result.put(regionGroupId, regionLeaderMap.getOrDefault(regionGroupId, -1));
+                  }
+                }));
 
     return result;
   }
diff --git a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
index 027cc23107d..2105071f933 100644
--- a/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
+++ b/iotdb-core/confignode/src/main/java/org/apache/iotdb/confignode/persistence/partition/PartitionInfo.java
@@ -812,13 +812,12 @@ public class PartitionInfo implements SnapshotProcessor {
    */
   public Map<String, List<TConsensusGroupId>> getAllRegionGroupIdMap(TConsensusGroupType type) {
     Map<String, List<TConsensusGroupId>> result = new TreeMap<>();
-    databasePartitionTables
-        .forEach(
-            (database, databasePartitionTable) -> {
-              if (databasePartitionTable.isNotPreDeleted()) {
-                result.put(database, databasePartitionTable.getAllRegionGroupIds(type));
-              }
-            });
+    databasePartitionTables.forEach(
+        (database, databasePartitionTable) -> {
+          if (databasePartitionTable.isNotPreDeleted()) {
+            result.put(database, databasePartitionTable.getAllRegionGroupIds(type));
+          }
+        });
     return result;
   }
 
diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/RegionAllocatorAndLeaderBalancerCombinatorialManualTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/RegionAllocatorAndLeaderBalancerCombinatorialManualTest.java
new file mode 100644
index 00000000000..0067ae19eb1
--- /dev/null
+++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/RegionAllocatorAndLeaderBalancerCombinatorialManualTest.java
@@ -0,0 +1,201 @@
+package org.apache.iotdb.confignode.manager.load.balancer;
+
+import org.apache.iotdb.common.rpc.thrift.TConsensusGroupId;
+import org.apache.iotdb.common.rpc.thrift.TConsensusGroupType;
+import org.apache.iotdb.common.rpc.thrift.TDataNodeConfiguration;
+import org.apache.iotdb.common.rpc.thrift.TDataNodeLocation;
+import org.apache.iotdb.common.rpc.thrift.TRegionReplicaSet;
+import org.apache.iotdb.confignode.manager.load.balancer.region.GreedyCopySetRegionGroupAllocator;
+import org.apache.iotdb.confignode.manager.load.balancer.region.IRegionGroupAllocator;
+import org.apache.iotdb.confignode.manager.load.balancer.router.leader.ILeaderBalancer;
+import org.apache.iotdb.confignode.manager.load.balancer.router.leader.MinCostFlowLeaderBalancer;
+
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.BitSet;
+import java.util.Collections;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.TreeMap;
+import java.util.TreeSet;
+import java.util.stream.Collectors;
+
+public class RegionAllocatorAndLeaderBalancerCombinatorialManualTest {
+
+  private static final Logger LOGGER =
+      LoggerFactory.getLogger(RegionAllocatorAndLeaderBalancerCombinatorialManualTest.class);
+
+  private static final IRegionGroupAllocator ALLOCATOR = new GreedyCopySetRegionGroupAllocator();
+  private static final ILeaderBalancer BALANCER = new MinCostFlowLeaderBalancer();
+
+  private static final int TEST_LOOP = 100;
+  private static final int TEST_DATA_NODE_NUM = 10;
+  private static final int DATA_REGION_PER_DATA_NODE = 4;
+  private static final int DATA_REPLICATION_FACTOR = 3;
+  private static final String DATABASE = "root.db";
+
+  private static final Map<Integer, TDataNodeConfiguration> AVAILABLE_DATA_NODE_MAP =
+      new TreeMap<>();
+  private static final Map<Integer, Double> FREE_SPACE_MAP = new TreeMap<>();
+
+  @BeforeClass
+  public static void setUp() {
+    // Construct TEST_DATA_NODE_NUM DataNodes
+    Random random = new Random();
+    for (int i = 1; i <= TEST_DATA_NODE_NUM; i++) {
+      AVAILABLE_DATA_NODE_MAP.put(
+          i, new TDataNodeConfiguration().setLocation(new TDataNodeLocation().setDataNodeId(i)));
+      FREE_SPACE_MAP.put(i, random.nextDouble());
+    }
+  }
+
+  @Test
+  public void manualTest() {
+    final int dataRegionGroupNum =
+        DATA_REGION_PER_DATA_NODE * TEST_DATA_NODE_NUM / DATA_REPLICATION_FACTOR;
+    List<Integer> regionCountList = new ArrayList<>();
+    List<Integer> scatterWidthList = new ArrayList<>();
+    List<Integer> leaderCountList = new ArrayList<>();
+    for (int loop = 1; loop <= TEST_LOOP; loop++) {
+      /* Allocate RegionGroup */
+      List<TRegionReplicaSet> allocateResult = new ArrayList<>();
+      for (int index = 0; index < dataRegionGroupNum; index++) {
+        allocateResult.add(
+            ALLOCATOR.generateOptimalRegionReplicasDistribution(
+                AVAILABLE_DATA_NODE_MAP,
+                FREE_SPACE_MAP,
+                allocateResult,
+                allocateResult,
+                DATA_REPLICATION_FACTOR,
+                new TConsensusGroupId(TConsensusGroupType.DataRegion, index)));
+      }
+
+      /* Count Region in each DataNode */
+      // Map<DataNodeId, RegionGroup Count>
+      Map<Integer, Integer> regionCounter = new TreeMap<>();
+      allocateResult.forEach(
+          regionReplicaSet ->
+              regionReplicaSet
+                  .getDataNodeLocations()
+                  .forEach(
+                      dataNodeLocation ->
+                          regionCounter.merge(dataNodeLocation.getDataNodeId(), 1, Integer::sum)));
+
+      /* Calculate scatter width for each DataNode */
+      // Map<DataNodeId, ScatterWidth>
+      // where a true in the bitset denotes the corresponding DataNode can help the DataNode in
+      // Map-Key to share the RegionGroup-leader and restore data when restarting.
+      // The more true in the bitset, the more safety the cluster DataNode in Map-Key is.
+      Map<Integer, BitSet> scatterWidthMap = new TreeMap<>();
+      for (TRegionReplicaSet replicaSet : allocateResult) {
+        for (int i = 0; i < DATA_REPLICATION_FACTOR; i++) {
+          for (int j = i + 1; j < DATA_REPLICATION_FACTOR; j++) {
+            int dataNodeId1 = replicaSet.getDataNodeLocations().get(i).getDataNodeId();
+            int dataNodeId2 = replicaSet.getDataNodeLocations().get(j).getDataNodeId();
+            scatterWidthMap.computeIfAbsent(dataNodeId1, empty -> new BitSet()).set(dataNodeId2);
+            scatterWidthMap.computeIfAbsent(dataNodeId2, empty -> new BitSet()).set(dataNodeId1);
+          }
+        }
+      }
+      int scatterWidthSum = 0;
+      int minScatterWidth = Integer.MAX_VALUE;
+      int maxScatterWidth = Integer.MIN_VALUE;
+      for (int i = 1; i <= TEST_DATA_NODE_NUM; i++) {
+        int scatterWidth = scatterWidthMap.get(i).cardinality();
+        scatterWidthSum += scatterWidth;
+        minScatterWidth = Math.min(minScatterWidth, scatterWidth);
+        maxScatterWidth = Math.max(maxScatterWidth, scatterWidth);
+        regionCountList.add(regionCounter.get(i));
+        scatterWidthList.add(scatterWidth);
+      }
+      LOGGER.info(
+          "Loop: {}, Test :{}, {}",
+          loop,
+          ALLOCATOR.getClass().getSimpleName(),
+          BALANCER.getClass().getSimpleName());
+      LOGGER.info(
+          "Allocate {} DataRegionGroups for {} DataNodes", dataRegionGroupNum, TEST_DATA_NODE_NUM);
+      LOGGER.info(
+          "Scatter width avg: {}, min: {}, max: {}",
+          (double) scatterWidthSum / TEST_DATA_NODE_NUM,
+          minScatterWidth,
+          maxScatterWidth);
+
+      /* Balance Leader */
+      Map<String, List<TConsensusGroupId>> databaseRegionGroupMap =
+          Collections.singletonMap(
+              DATABASE,
+              allocateResult.stream()
+                  .map(TRegionReplicaSet::getRegionId)
+                  .collect(Collectors.toList()));
+      Map<TConsensusGroupId, TRegionReplicaSet> regionReplicaSetMap =
+          allocateResult.stream().collect(Collectors.toMap(TRegionReplicaSet::getRegionId, r -> r));
+      Map<TConsensusGroupId, Integer> optimalLeaderDistribution =
+          BALANCER.generateOptimalLeaderDistribution(
+              databaseRegionGroupMap, regionReplicaSetMap, new TreeMap<>(), new TreeSet<>());
+      // Map<DataNodeId, Leader Count>
+      Map<Integer, Integer> leaderCounter = new TreeMap<>();
+      optimalLeaderDistribution.forEach(
+          (regionId, leaderId) -> leaderCounter.merge(leaderId, 1, Integer::sum));
+      int minLeaderCount = leaderCounter.values().stream().min(Integer::compareTo).orElse(0);
+      int maxLeaderCount = leaderCounter.values().stream().max(Integer::compareTo).orElse(0);
+      leaderCounter.forEach((dataNodeId, leaderCount) -> leaderCountList.add(leaderCount));
+      LOGGER.info("Leader count min: {}, max: {}", minLeaderCount, maxLeaderCount);
+    }
+
+    LOGGER.info("All tests done.");
+    double regionCountAvg =
+        regionCountList.stream().mapToInt(Integer::intValue).average().orElse(0);
+    double regionCountVariance =
+        regionCountList.stream()
+                .mapToInt(Integer::intValue)
+                .mapToDouble(i -> Math.pow(i - regionCountAvg, 2))
+                .sum()
+            / regionCountList.size();
+    int regionCountRange =
+        regionCountList.stream().mapToInt(Integer::intValue).max().orElse(0)
+            - regionCountList.stream().mapToInt(Integer::intValue).min().orElse(0);
+    LOGGER.info(
+        "Region count avg: {}, var: {}, range: {}",
+        regionCountAvg,
+        regionCountVariance,
+        regionCountRange);
+    double scatterWidthAvg =
+        scatterWidthList.stream().mapToInt(Integer::intValue).average().orElse(0);
+    double scatterWidthVariance =
+        scatterWidthList.stream()
+                .mapToInt(Integer::intValue)
+                .mapToDouble(i -> Math.pow(i - scatterWidthAvg, 2))
+                .sum()
+            / scatterWidthList.size();
+    int scatterWidthRange =
+        scatterWidthList.stream().mapToInt(Integer::intValue).max().orElse(0)
+            - scatterWidthList.stream().mapToInt(Integer::intValue).min().orElse(0);
+    LOGGER.info(
+        "Scatter width avg: {}, var: {}, range: {}",
+        scatterWidthAvg,
+        scatterWidthVariance,
+        scatterWidthRange);
+    double leaderCountAvg =
+        leaderCountList.stream().mapToInt(Integer::intValue).average().orElse(0);
+    double leaderCountVariance =
+        leaderCountList.stream()
+                .mapToInt(Integer::intValue)
+                .mapToDouble(i -> Math.pow(i - leaderCountAvg, 2))
+                .sum()
+            / leaderCountList.size();
+    int leaderCountRange =
+        leaderCountList.stream().mapToInt(Integer::intValue).max().orElse(0)
+            - leaderCountList.stream().mapToInt(Integer::intValue).min().orElse(0);
+    LOGGER.info(
+        "Leader count avg: {}, var: {}, range: {}",
+        leaderCountAvg,
+        leaderCountVariance,
+        leaderCountRange);
+  }
+}
diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
index 7bf89eeff98..95a23ab3e64 100644
--- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
+++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/region/GreedyCopySetRegionGroupAllocatorTest.java
@@ -39,6 +39,7 @@ import java.util.List;
 import java.util.Map;
 import java.util.Random;
 import java.util.TreeMap;
+import java.util.stream.Collectors;
 
 public class GreedyCopySetRegionGroupAllocatorTest {
 
@@ -133,7 +134,13 @@ public class GreedyCopySetRegionGroupAllocatorTest {
                     .computeIfAbsent(databaseId, empty -> new TreeMap<>())
                     .merge(dataNodeLocation.getDataNodeId(), 1, Integer::sum);
               });
-      LOGGER.info("After allocate RegionGroup: {}", index);
+      LOGGER.info(
+          "After allocate RegionGroup: {}, Database: {}, plan: {}",
+          index,
+          databaseId,
+          greedyCopySetRegionGroup.getDataNodeLocations().stream()
+              .map(TDataNodeLocation::getDataNodeId)
+              .collect(Collectors.toList()));
       for (int i = 0; i < TEST_DATABASE_NUM; i++) {
         LOGGER.info("Database {}: {}", i, greedyCopySetDatabaseRegionCounter.get(i));
       }
diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancerTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancerTest.java
index a7f4103345f..b112cd961f8 100644
--- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancerTest.java
+++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/GreedyLeaderBalancerTest.java
@@ -76,8 +76,8 @@ public class GreedyLeaderBalancerTest {
     }
 
     Map<TConsensusGroupId, Integer> leaderDistribution =
-        BALANCER.generateOptimalLeaderDistribution(new TreeMap<>(),
-            regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
+        BALANCER.generateOptimalLeaderDistribution(
+            new TreeMap<>(), regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
     Map<Integer, AtomicInteger> leaderCounter = new ConcurrentHashMap<>();
     leaderDistribution.forEach(
         (regionGroupId, leaderId) ->
@@ -129,8 +129,8 @@ public class GreedyLeaderBalancerTest {
     }
 
     Map<TConsensusGroupId, Integer> leaderDistribution =
-        BALANCER.generateOptimalLeaderDistribution(new TreeMap<>(),
-            regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
+        BALANCER.generateOptimalLeaderDistribution(
+            new TreeMap<>(), regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
     Map<Integer, AtomicInteger> leaderCounter = new ConcurrentHashMap<>();
     leaderDistribution.forEach(
         (regionGroupId, leaderId) ->
diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/LeaderBalancerComparisonTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/LeaderBalancerComparisonTest.java
index f289d8746fa..5db7f80bd2f 100644
--- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/LeaderBalancerComparisonTest.java
+++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/LeaderBalancerComparisonTest.java
@@ -264,8 +264,8 @@ public class LeaderBalancerComparisonTest {
     Map<TConsensusGroupId, Integer> lastDistribution = new ConcurrentHashMap<>(regionLeaderMap);
     for (int rounds = 0; rounds < 1000; rounds++) {
       Map<TConsensusGroupId, Integer> currentDistribution =
-          leaderBalancer.generateOptimalLeaderDistribution(new TreeMap<>(),
-              regionReplicaSetMap, lastDistribution, disabledDataNodeSet);
+          leaderBalancer.generateOptimalLeaderDistribution(
+              new TreeMap<>(), regionReplicaSetMap, lastDistribution, disabledDataNodeSet);
       if (currentDistribution.equals(lastDistribution)) {
         // The leader distribution is stable
         result.rounds = rounds;
diff --git a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancerTest.java b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancerTest.java
index 6807d8b314b..197c74a37a5 100644
--- a/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancerTest.java
+++ b/iotdb-core/confignode/src/test/java/org/apache/iotdb/confignode/manager/load/balancer/router/leader/MinCostFlowLeaderBalancerTest.java
@@ -29,7 +29,6 @@ import org.junit.Test;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.HashSet;
 import java.util.List;
 import java.util.Map;
@@ -39,7 +38,6 @@ import java.util.Set;
 import java.util.TreeMap;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicInteger;
-import java.util.stream.Collectors;
 
 public class MinCostFlowLeaderBalancerTest {
 
@@ -91,8 +89,8 @@ public class MinCostFlowLeaderBalancerTest {
 
     // Do balancing
     Map<TConsensusGroupId, Integer> leaderDistribution =
-        BALANCER.generateOptimalLeaderDistribution(databaseRegionGroupMap,
-            regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
+        BALANCER.generateOptimalLeaderDistribution(
+            databaseRegionGroupMap, regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
     // All RegionGroup got a leader
     Assert.assertEquals(3, leaderDistribution.size());
     // Each DataNode occurs exactly once
@@ -129,8 +127,8 @@ public class MinCostFlowLeaderBalancerTest {
 
     // Do balancing
     Map<TConsensusGroupId, Integer> leaderDistribution =
-        BALANCER.generateOptimalLeaderDistribution(databaseRegionGroupMap,
-            regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
+        BALANCER.generateOptimalLeaderDistribution(
+            databaseRegionGroupMap, regionReplicaSetMap, regionLeaderMap, disabledDataNodeSet);
     Assert.assertEquals(1, leaderDistribution.size());
     Assert.assertEquals(1, new HashSet<>(leaderDistribution.values()).size());
     // Leader remains the same
@@ -183,17 +181,13 @@ public class MinCostFlowLeaderBalancerTest {
 
     // Do balancing
     Map<TConsensusGroupId, Integer> leaderDistribution =
-        BALANCER.generateOptimalLeaderDistribution(databaseRegionGroupMap,
-            regionReplicaSetMap, regionLeaderMap, new HashSet<>());
+        BALANCER.generateOptimalLeaderDistribution(
+            databaseRegionGroupMap, regionReplicaSetMap, regionLeaderMap, new HashSet<>());
     // All RegionGroup got a leader
     Assert.assertEquals(regionGroupNum, leaderDistribution.size());
 
     Map<Integer, Integer> leaderCounter = new ConcurrentHashMap<>();
-    leaderDistribution
-        .values()
-        .forEach(
-            leaderId ->
-                leaderCounter.merge(leaderId, 1, Integer::sum));
+    leaderDistribution.values().forEach(leaderId -> leaderCounter.merge(leaderId, 1, Integer::sum));
     // Every DataNode has leader
     Assert.assertEquals(dataNodeNum, leaderCounter.size());
     // Every DataNode has exactly regionGroupNum / dataNodeNum leaders