You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@druid.apache.org by GitBox <gi...@apache.org> on 2018/11/28 10:42:46 UTC

[GitHub] gianm closed pull request #6503: Add rebalance strategy for RandomBalancerStrategy

gianm closed pull request #6503: Add rebalance strategy for RandomBalancerStrategy
URL: https://github.com/apache/incubator-druid/pull/6503
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git a/server/src/main/java/org/apache/druid/server/coordinator/BalancerStrategyFactory.java b/server/src/main/java/org/apache/druid/server/coordinator/BalancerStrategyFactory.java
index 7e79c9a4455..de4fcf36526 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/BalancerStrategyFactory.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/BalancerStrategyFactory.java
@@ -33,4 +33,10 @@
 public interface BalancerStrategyFactory
 {
   BalancerStrategy createBalancerStrategy(ListeningExecutorService exec);
+
+  default BalancerStrategy createBalancerStrategy(ListeningExecutorService exec, int balancerThreshold)
+  {
+    return createBalancerStrategy(exec);
+  }
+
 }
diff --git a/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java b/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java
index c51a04a8dea..af65a8bf431 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/CoordinatorDynamicConfig.java
@@ -67,6 +67,7 @@
    * See {@link LoadQueuePeon}, {@link org.apache.druid.server.coordinator.rules.LoadRule#run}
    */
   private final int maxSegmentsInNodeLoadingQueue;
+  private final int balancerThreshold;
 
   @JsonCreator
   public CoordinatorDynamicConfig(
@@ -85,7 +86,8 @@ public CoordinatorDynamicConfig(
       @JsonProperty("killDataSourceWhitelist") Object killDataSourceWhitelist,
       @JsonProperty("killAllDataSources") boolean killAllDataSources,
       @JsonProperty("killPendingSegmentsSkipList") Object killPendingSegmentsSkipList,
-      @JsonProperty("maxSegmentsInNodeLoadingQueue") int maxSegmentsInNodeLoadingQueue
+      @JsonProperty("maxSegmentsInNodeLoadingQueue") int maxSegmentsInNodeLoadingQueue,
+      @JsonProperty("balancerThreshold") int balancerThreshold
   )
   {
     this.millisToWaitBeforeDeleting = millisToWaitBeforeDeleting;
@@ -100,6 +102,7 @@ public CoordinatorDynamicConfig(
     this.killDataSourceWhitelist = parseJsonStringOrArray(killDataSourceWhitelist);
     this.killPendingSegmentsSkipList = parseJsonStringOrArray(killPendingSegmentsSkipList);
     this.maxSegmentsInNodeLoadingQueue = maxSegmentsInNodeLoadingQueue;
+    this.balancerThreshold = Math.max(balancerThreshold, 1);
 
     if (this.killAllDataSources && !this.killDataSourceWhitelist.isEmpty()) {
       throw new IAE("can't have killAllDataSources and non-empty killDataSourceWhitelist");
@@ -212,6 +215,12 @@ public int getMaxSegmentsInNodeLoadingQueue()
     return maxSegmentsInNodeLoadingQueue;
   }
 
+  @JsonProperty
+  public int getBalancerThreshold()
+  {
+    return balancerThreshold;
+  }
+
   @Override
   public String toString()
   {
@@ -228,6 +237,7 @@ public String toString()
            ", killAllDataSources=" + killAllDataSources +
            ", killPendingSegmentsSkipList=" + killPendingSegmentsSkipList +
            ", maxSegmentsInNodeLoadingQueue=" + maxSegmentsInNodeLoadingQueue +
+           ", balancerThreshold=" + balancerThreshold +
            '}';
   }
 
@@ -276,6 +286,9 @@ public boolean equals(Object o)
     if (!Objects.equals(killDataSourceWhitelist, that.killDataSourceWhitelist)) {
       return false;
     }
+    if (balancerThreshold != that.balancerThreshold) {
+      return false;
+    }
     return Objects.equals(killPendingSegmentsSkipList, that.killPendingSegmentsSkipList);
   }
 
@@ -294,7 +307,8 @@ public int hashCode()
         killAllDataSources,
         maxSegmentsInNodeLoadingQueue,
         killDataSourceWhitelist,
-        killPendingSegmentsSkipList
+        killPendingSegmentsSkipList,
+        balancerThreshold
     );
   }
 
@@ -315,6 +329,7 @@ public static Builder builder()
     private static final boolean DEFAULT_EMIT_BALANCING_STATS = false;
     private static final boolean DEFAULT_KILL_ALL_DATA_SOURCES = false;
     private static final int DEFAULT_MAX_SEGMENTS_IN_NODE_LOADING_QUEUE = 0;
+    private static final int DEFAULT_BALANCER_THRESHOLD = 5;
 
     private Long millisToWaitBeforeDeleting;
     private Long mergeBytesLimit;
@@ -328,6 +343,7 @@ public static Builder builder()
     private Boolean killAllDataSources;
     private Object killPendingSegmentsSkipList;
     private Integer maxSegmentsInNodeLoadingQueue;
+    private Integer balancerThreshold;
 
     public Builder()
     {
@@ -346,7 +362,8 @@ public Builder(
         @JsonProperty("killDataSourceWhitelist") @Nullable Object killDataSourceWhitelist,
         @JsonProperty("killAllDataSources") @Nullable Boolean killAllDataSources,
         @JsonProperty("killPendingSegmentsSkipList") @Nullable Object killPendingSegmentsSkipList,
-        @JsonProperty("maxSegmentsInNodeLoadingQueue") @Nullable Integer maxSegmentsInNodeLoadingQueue
+        @JsonProperty("maxSegmentsInNodeLoadingQueue") @Nullable Integer maxSegmentsInNodeLoadingQueue,
+        @JsonProperty("balancerThreshold") Integer balancerThreshold
     )
     {
       this.millisToWaitBeforeDeleting = millisToWaitBeforeDeleting;
@@ -361,6 +378,7 @@ public Builder(
       this.killDataSourceWhitelist = killDataSourceWhitelist;
       this.killPendingSegmentsSkipList = killPendingSegmentsSkipList;
       this.maxSegmentsInNodeLoadingQueue = maxSegmentsInNodeLoadingQueue;
+      this.balancerThreshold = balancerThreshold;
     }
 
     public Builder withMillisToWaitBeforeDeleting(long millisToWaitBeforeDeleting)
@@ -429,6 +447,13 @@ public Builder withMaxSegmentsInNodeLoadingQueue(int maxSegmentsInNodeLoadingQue
       return this;
     }
 
+    public Builder withBalancerThreshold(int balancerThreshold)
+    {
+      this.balancerThreshold = balancerThreshold;
+      return this;
+    }
+
+
     public CoordinatorDynamicConfig build()
     {
       return new CoordinatorDynamicConfig(
@@ -445,7 +470,8 @@ public CoordinatorDynamicConfig build()
           killPendingSegmentsSkipList,
           maxSegmentsInNodeLoadingQueue == null
           ? DEFAULT_MAX_SEGMENTS_IN_NODE_LOADING_QUEUE
-          : maxSegmentsInNodeLoadingQueue
+          : maxSegmentsInNodeLoadingQueue,
+          balancerThreshold == null ? DEFAULT_BALANCER_THRESHOLD : balancerThreshold
       );
     }
 
@@ -465,7 +491,8 @@ public CoordinatorDynamicConfig build(CoordinatorDynamicConfig defaults)
           killPendingSegmentsSkipList == null ? defaults.getKillPendingSegmentsSkipList() : killPendingSegmentsSkipList,
           maxSegmentsInNodeLoadingQueue == null
           ? defaults.getMaxSegmentsInNodeLoadingQueue()
-          : maxSegmentsInNodeLoadingQueue
+          : maxSegmentsInNodeLoadingQueue,
+          balancerThreshold == null ? defaults.getBalancerThreshold() : balancerThreshold
       );
     }
   }
diff --git a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java
index 852068342c0..dc01d9e4329 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/DruidCoordinator.java
@@ -652,7 +652,10 @@ public void run()
             getDynamicConfigs().getBalancerComputeThreads(),
             "coordinator-cost-balancer-%s"
         ));
-        BalancerStrategy balancerStrategy = factory.createBalancerStrategy(balancerExec);
+        BalancerStrategy balancerStrategy = factory.createBalancerStrategy(
+            balancerExec,
+            getDynamicConfigs().getBalancerThreshold()
+        );
 
         // Do coordinator stuff.
         DruidCoordinatorRuntimeParams params =
diff --git a/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategy.java b/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategy.java
index 8b0b3069817..287305bb0ac 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategy.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategy.java
@@ -19,6 +19,7 @@
 
 package org.apache.druid.server.coordinator;
 
+import org.apache.commons.compress.utils.Lists;
 import org.apache.druid.timeline.DataSegment;
 
 import java.util.ArrayList;
@@ -27,9 +28,17 @@
 import java.util.List;
 import java.util.NavigableSet;
 import java.util.concurrent.ThreadLocalRandom;
+import java.util.stream.Collectors;
 
 public class RandomBalancerStrategy implements BalancerStrategy
 {
+  private final int balancerThreshold;
+
+  public RandomBalancerStrategy(int balancerThreshold)
+  {
+    this.balancerThreshold = balancerThreshold;
+  }
+
   @Override
   public ServerHolder findNewSegmentHomeReplicator(DataSegment proposalSegment, List<ServerHolder> serverHolders)
   {
@@ -47,7 +56,24 @@ public ServerHolder findNewSegmentHomeReplicator(DataSegment proposalSegment, Li
   @Override
   public ServerHolder findNewSegmentHomeBalancer(DataSegment proposalSegment, List<ServerHolder> serverHolders)
   {
-    return null;  //To change body of implemented methods use File | Settings | File Templates.
+    double used = 0;
+    double max = 0;
+    for (ServerHolder holder : serverHolders) {
+      used += holder.getSizeUsed();
+      max += holder.getMaxSize();
+    }
+    double usedAvg = (100 * used) / max;
+    List<ServerHolder> serverCandidates = Lists.newArrayList();
+    serverCandidates.addAll(serverHolders.stream()
+                                         .filter(holder -> usedAvg - holder.getPercentUsed() > balancerThreshold)
+                                         .filter(holder -> !holder.isServingSegment(proposalSegment))
+                                         .collect(Collectors.toList()));
+    if (serverCandidates.isEmpty()) {
+      return null;
+    } else {
+      return serverCandidates.get(ThreadLocalRandom.current()
+                                                   .nextInt(serverCandidates.size()));
+    }
   }
 
   @Override
diff --git a/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategyFactory.java b/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategyFactory.java
index c02554050eb..51316e75c8c 100644
--- a/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategyFactory.java
+++ b/server/src/main/java/org/apache/druid/server/coordinator/RandomBalancerStrategyFactory.java
@@ -23,9 +23,18 @@
 
 public class RandomBalancerStrategyFactory implements BalancerStrategyFactory
 {
+  private static final int DEFAULT_BALANCER_THRESHOLD = 5;
+
   @Override
   public BalancerStrategy createBalancerStrategy(ListeningExecutorService exec)
   {
-    return new RandomBalancerStrategy();
+    return new RandomBalancerStrategy(DEFAULT_BALANCER_THRESHOLD);
+  }
+
+  @Override
+  public BalancerStrategy createBalancerStrategy(ListeningExecutorService exec, int balancerThreshold)
+  {
+    return new RandomBalancerStrategy(balancerThreshold);
   }
+
 }
diff --git a/server/src/test/java/org/apache/druid/server/coordinator/RandomBalancerStrategyTest.java b/server/src/test/java/org/apache/druid/server/coordinator/RandomBalancerStrategyTest.java
new file mode 100644
index 00000000000..c97865d9324
--- /dev/null
+++ b/server/src/test/java/org/apache/druid/server/coordinator/RandomBalancerStrategyTest.java
@@ -0,0 +1,162 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+
+package org.apache.druid.server.coordinator;
+
+
+import com.google.common.collect.ImmutableMap;
+import org.apache.druid.client.ImmutableDruidDataSource;
+import org.apache.druid.client.ImmutableDruidServer;
+import org.apache.druid.java.util.common.Intervals;
+import org.apache.druid.server.coordination.DruidServerMetadata;
+import org.apache.druid.server.coordination.ServerType;
+import org.apache.druid.timeline.DataSegment;
+import org.easymock.EasyMock;
+import org.joda.time.Interval;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.concurrent.ConcurrentHashMap;
+
+public class RandomBalancerStrategyTest
+{
+  private static final Interval day = Intervals.of("2015-01-01T00/2015-01-01T01");
+  private static final long MAX_SIZE_PER_HISTORICAL = 10000L;
+
+  /**
+   * Create Druid cluster with serverCount servers having maxSegments segments each with currentSize per server is 3000L,
+   * and 1 server with (maxSegments - 2) segment with currentSize is 1000L
+   * <p>
+   * Random Balancer Strategy should rebalance the segment to the server with less PercentUsed.
+   * <p>
+   * Avg used percent is 0.281818182
+   * Used percent of BEST_SERVER is 0.1
+   */
+  public static List<ServerHolder> setupDummyCluster(int serverCount, int maxSegments)
+  {
+    List<ServerHolder> serverHolderList = new ArrayList<>();
+    // Create 10 servers with current size being 3K & max size being 10K
+    // Each having having 20 segments
+    for (int i = 0; i < serverCount; i++) {
+      LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
+      Map<String, DataSegment> segments = new HashMap<>();
+      for (int j = 0; j < maxSegments; j++) {
+        DataSegment segment = getSegment(j);
+        segments.put(segment.getIdentifier(), segment);
+      }
+
+      serverHolderList.add(
+          new ServerHolder(
+              new ImmutableDruidServer(
+                  new DruidServerMetadata(
+                      "DruidServer_Name_" + i,
+                      "localhost",
+                      null,
+                      MAX_SIZE_PER_HISTORICAL,
+                      ServerType.HISTORICAL,
+                      "hot",
+                      1
+                  ),
+                  3000L,
+                  ImmutableMap.of("DUMMY", EasyMock.createMock(ImmutableDruidDataSource.class)),
+                  ImmutableMap.copyOf(segments)
+              ),
+              fromPeon
+          ));
+    }
+
+    // The best server to be available for next segment assignment has only 18 Segments and current size is 1K
+    LoadQueuePeonTester fromPeon = new LoadQueuePeonTester();
+    ImmutableDruidServer druidServer = EasyMock.createMock(ImmutableDruidServer.class);
+    EasyMock.expect(druidServer.getName()).andReturn("BEST_SERVER").anyTimes();
+    EasyMock.expect(druidServer.getCurrSize()).andReturn(1000L).anyTimes();
+    EasyMock.expect(druidServer.getMaxSize()).andReturn(MAX_SIZE_PER_HISTORICAL).anyTimes();
+
+    EasyMock.expect(druidServer.getSegment(EasyMock.anyObject())).andReturn(null).anyTimes();
+    Map<String, DataSegment> segments = new HashMap<>();
+    for (int j = 0; j < (maxSegments - 2); j++) {
+      DataSegment segment = getSegment(j);
+      segments.put(segment.getIdentifier(), segment);
+      EasyMock.expect(druidServer.getSegment(segment.getIdentifier())).andReturn(segment).anyTimes();
+    }
+    EasyMock.expect(druidServer.getSegments()).andReturn(segments).anyTimes();
+
+    EasyMock.replay(druidServer);
+    serverHolderList.add(new ServerHolder(druidServer, fromPeon));
+    return serverHolderList;
+  }
+
+  /**
+   * Returns segment with dummy id and size 100
+   *
+   * @param index
+   *
+   * @return segment
+   */
+  public static DataSegment getSegment(int index)
+  {
+    return getSegment(index, "DUMMY", day);
+  }
+
+  public static DataSegment getSegment(int index, String dataSource, Interval interval)
+  {
+    // Not using EasyMock as it hampers the performance of multithreads.
+    DataSegment segment = new DataSegment(
+        dataSource,
+        interval,
+        String.valueOf(index),
+        new ConcurrentHashMap<>(),
+        new ArrayList<>(),
+        new ArrayList<>(),
+        null,
+        0,
+        index * 100L
+    );
+    return segment;
+  }
+
+  @Test
+  public void testFindNewSegmentHomeBalancerSuccess() throws InterruptedException
+  {
+    List<ServerHolder> serverHolderList = setupDummyCluster(10, 20);
+    DataSegment segment = getSegment(1000);
+    // 28.18
+    BalancerStrategy strategy = new RandomBalancerStrategy(18);
+    ServerHolder holder = strategy.findNewSegmentHomeBalancer(segment, serverHolderList);
+    Assert.assertNotNull("Should be able to find a place for new segment!!", holder);
+    Assert.assertEquals("Best Server should be BEST_SERVER", "BEST_SERVER", holder.getServer().getName());
+  }
+
+  @Test
+  public void testFindNewSegmentHomeBalancerFail() throws InterruptedException
+  {
+    List<ServerHolder> serverHolderList = setupDummyCluster(10, 20);
+    DataSegment segment = getSegment(1000);
+
+    BalancerStrategy strategy = new RandomBalancerStrategy(19);
+    ServerHolder holder = strategy.findNewSegmentHomeBalancer(segment, serverHolderList);
+    Assert.assertNull("Should not be able to find a place for new segment!!", holder);
+  }
+
+}
diff --git a/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java b/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java
index 002e1977c15..78e6ff1811f 100644
--- a/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java
+++ b/server/src/test/java/org/apache/druid/server/http/CoordinatorDynamicConfigTest.java
@@ -49,7 +49,8 @@ public void testSerde() throws Exception
                      + "  \"balancerComputeThreads\": 2, \n"
                      + "  \"emitBalancingStats\": true,\n"
                      + "  \"killDataSourceWhitelist\": [\"test1\",\"test2\"],\n"
-                     + "  \"maxSegmentsInNodeLoadingQueue\": 1\n"
+                     + "  \"maxSegmentsInNodeLoadingQueue\": 1,\n"
+                     + "  \"balancerThreshold\": 5 \n"
                      + "}\n";
 
     CoordinatorDynamicConfig actual = mapper.readValue(
@@ -61,7 +62,7 @@ public void testSerde() throws Exception
         ),
         CoordinatorDynamicConfig.class
     );
-    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of("test1", "test2"), false, 1);
+    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of("test1", "test2"), false, 1, 5);
   }
 
   @Test
@@ -77,7 +78,8 @@ public void testSerdeWithStringinKillDataSourceWhitelist() throws Exception
                      + "  \"balancerComputeThreads\": 2, \n"
                      + "  \"emitBalancingStats\": true,\n"
                      + "  \"killDataSourceWhitelist\": \"test1, test2\", \n"
-                     + "  \"maxSegmentsInNodeLoadingQueue\": 1\n"
+                     + "  \"maxSegmentsInNodeLoadingQueue\": 1,\n"
+                     + "  \"balancerThreshold\": 5 \n"
                      + "}\n";
 
     CoordinatorDynamicConfig actual = mapper.readValue(
@@ -89,7 +91,7 @@ public void testSerdeWithStringinKillDataSourceWhitelist() throws Exception
         ),
         CoordinatorDynamicConfig.class
     );
-    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of("test1", "test2"), false, 1);
+    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of("test1", "test2"), false, 1, 5);
   }
 
   @Test
@@ -105,7 +107,8 @@ public void testSerdeWithKillAllDataSources() throws Exception
                      + "  \"balancerComputeThreads\": 2, \n"
                      + "  \"emitBalancingStats\": true,\n"
                      + "  \"killAllDataSources\": true,\n"
-                     + "  \"maxSegmentsInNodeLoadingQueue\": 1\n"
+                     + "  \"maxSegmentsInNodeLoadingQueue\": 1,\n"
+                     + "  \"balancerThreshold\": 5 \n"
                      + "}\n";
 
     CoordinatorDynamicConfig actual = mapper.readValue(
@@ -118,7 +121,7 @@ public void testSerdeWithKillAllDataSources() throws Exception
         CoordinatorDynamicConfig.class
     );
 
-    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of(), true, 1);
+    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of(), true, 1, 5);
 
     //ensure whitelist is empty when killAllDataSources is true
     try {
@@ -150,7 +153,8 @@ public void testDeserializeWithoutMaxSegmentsInNodeLoadingQueue() throws Excepti
                      + "  \"replicationThrottleLimit\": 1,\n"
                      + "  \"balancerComputeThreads\": 2, \n"
                      + "  \"emitBalancingStats\": true,\n"
-                     + "  \"killAllDataSources\": true\n"
+                     + "  \"killAllDataSources\": true,\n"
+                     + "  \"balancerThreshold\": 5 \n"
                      + "}\n";
 
     CoordinatorDynamicConfig actual = mapper.readValue(
@@ -163,7 +167,7 @@ public void testDeserializeWithoutMaxSegmentsInNodeLoadingQueue() throws Excepti
         CoordinatorDynamicConfig.class
     );
 
-    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of(), true, 0);
+    assertConfig(actual, 1, 1, 1, 1, 1, 1, 2, true, ImmutableSet.of(), true, 0, 5);
   }
 
   @Test
@@ -171,7 +175,7 @@ public void testBuilderDefaults()
   {
 
     CoordinatorDynamicConfig defaultConfig = CoordinatorDynamicConfig.builder().build();
-    assertConfig(defaultConfig, 900000, 524288000, 100, 5, 15, 10, 1, false, ImmutableSet.of(), false, 0);
+    assertConfig(defaultConfig, 900000, 524288000, 100, 5, 15, 10, 1, false, ImmutableSet.of(), false, 0, 5);
   }
 
   @Test
@@ -184,7 +188,7 @@ public void testUpdate()
     Assert.assertEquals(
         current,
         new CoordinatorDynamicConfig
-            .Builder(null, null, null, null, null, null, null, null, null, null, null, null)
+            .Builder(null, null, null, null, null, null, null, null, null, null, null, null, 5)
             .build(current)
     );
   }
@@ -209,7 +213,8 @@ private void assertConfig(CoordinatorDynamicConfig config,
                             boolean expectedEmitingBalancingStats,
                             Set<String> expectedKillDataSourceWhitelist,
                             boolean expectedKillAllDataSources,
-                            int expectedMaxSegmentsInNodeLoadingQueue)
+                            int expectedMaxSegmentsInNodeLoadingQueue,
+                            int balancerThreshold)
   {
     Assert.assertEquals(expectedMillisToWaitBeforeDeleting, config.getMillisToWaitBeforeDeleting());
     Assert.assertEquals(expectedMergeBytesLimit, config.getMergeBytesLimit());
@@ -222,5 +227,6 @@ private void assertConfig(CoordinatorDynamicConfig config,
     Assert.assertEquals(expectedKillDataSourceWhitelist, config.getKillDataSourceWhitelist());
     Assert.assertEquals(expectedKillAllDataSources, config.isKillAllDataSources());
     Assert.assertEquals(expectedMaxSegmentsInNodeLoadingQueue, config.getMaxSegmentsInNodeLoadingQueue());
+    Assert.assertEquals(balancerThreshold, config.getBalancerThreshold());
   }
 }


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
users@infra.apache.org


With regards,
Apache Git Services

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscribe@druid.apache.org
For additional commands, e-mail: commits-help@druid.apache.org