You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ab...@apache.org on 2018/04/10 17:07:15 UTC

lucene-solr:jira/solr-12181: SOLR-12181: Test the support for mixed bytes / docs bounds.

Repository: lucene-solr
Updated Branches:
  refs/heads/jira/solr-12181 751987d53 -> e94e05341


SOLR-12181: Test the support for mixed bytes / docs bounds.


Project: http://git-wip-us.apache.org/repos/asf/lucene-solr/repo
Commit: http://git-wip-us.apache.org/repos/asf/lucene-solr/commit/e94e0534
Tree: http://git-wip-us.apache.org/repos/asf/lucene-solr/tree/e94e0534
Diff: http://git-wip-us.apache.org/repos/asf/lucene-solr/diff/e94e0534

Branch: refs/heads/jira/solr-12181
Commit: e94e05341670a2471d6cadb8efd2482792b20f5a
Parents: 751987d
Author: Andrzej Bialecki <ab...@apache.org>
Authored: Tue Apr 10 19:06:29 2018 +0200
Committer: Andrzej Bialecki <ab...@apache.org>
Committed: Tue Apr 10 19:06:29 2018 +0200

----------------------------------------------------------------------
 .../cloud/autoscaling/IndexSizeTrigger.java     |  32 ++-
 .../cloud/autoscaling/IndexSizeTriggerTest.java | 198 +++++++++++++++++++
 2 files changed, 224 insertions(+), 6 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e94e0534/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
----------------------------------------------------------------------
diff --git a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
index 725da62..756f88f 100644
--- a/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
+++ b/solr/core/src/java/org/apache/solr/cloud/autoscaling/IndexSizeTrigger.java
@@ -38,6 +38,7 @@ import org.apache.solr.common.SolrException;
 import org.apache.solr.common.cloud.ClusterState;
 import org.apache.solr.common.cloud.DocCollection;
 import org.apache.solr.common.cloud.Replica;
+import org.apache.solr.common.cloud.Slice;
 import org.apache.solr.common.params.CollectionParams;
 import org.apache.solr.common.util.Pair;
 import org.apache.solr.common.util.StrUtils;
@@ -61,10 +62,11 @@ public class IndexSizeTrigger extends TriggerBase {
   public static final String BELOW_OP_PROP = "belowOp";
   public static final String COLLECTIONS_PROP = "collections";
 
-  public static final String BYTES_SIZE_PROP = "__bytesSize__";
-  public static final String DOCS_SIZE_PROP = "__docsSize__";
+  public static final String BYTES_SIZE_PROP = "__bytes__";
+  public static final String DOCS_SIZE_PROP = "__docs__";
   public static final String ABOVE_SIZE_PROP = "aboveSize";
   public static final String BELOW_SIZE_PROP = "belowSize";
+  public static final String VIOLATION_PROP = "violationType";
 
   public enum Unit { bytes, docs }
 
@@ -204,8 +206,12 @@ public class IndexSizeTrigger extends TriggerBase {
           DocCollection docCollection = clusterState.getCollection(coll);
 
           shards.forEach((sh, replicas) -> {
-            // check only the leader
-            Replica r = docCollection.getSlice(sh).getLeader();
+            // check only the leader of a replica in active shard
+            Slice s = docCollection.getSlice(sh);
+            if (s.getState() != Slice.State.ACTIVE) {
+              return;
+            }
+            Replica r = s.getLeader();
             // no leader - don't do anything
             if (r == null) {
               return;
@@ -219,7 +225,7 @@ public class IndexSizeTrigger extends TriggerBase {
               }
             }
             if (info == null) {
-              // probably replica is not on this node
+              // probably replica is not on this node?
               return;
             }
             // we have to translate to the metrics registry name, which uses "_replica_nN" as suffix
@@ -278,6 +284,11 @@ public class IndexSizeTrigger extends TriggerBase {
           ReplicaInfo info = e.getValue();
           List<ReplicaInfo> infos = aboveSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
           if (!infos.contains(info)) {
+            if ((Long)e.getValue().getVariable(BYTES_SIZE_PROP) > aboveBytes) {
+              info.getVariables().put(VIOLATION_PROP, ABOVE_BYTES_PROP);
+            } else {
+              info.getVariables().put(VIOLATION_PROP, ABOVE_DOCS_PROP);
+            }
             infos.add(info);
           }
         });
@@ -292,6 +303,11 @@ public class IndexSizeTrigger extends TriggerBase {
           ReplicaInfo info = e.getValue();
           List<ReplicaInfo> infos = belowSize.computeIfAbsent(info.getCollection(), c -> new ArrayList<>());
           if (!infos.contains(info)) {
+            if ((Long)e.getValue().getVariable(BYTES_SIZE_PROP) < belowBytes) {
+              info.getVariables().put(VIOLATION_PROP, BELOW_BYTES_PROP);
+            } else {
+              info.getVariables().put(VIOLATION_PROP, BELOW_DOCS_PROP);
+            }
             infos.add(info);
           }
         });
@@ -335,7 +351,11 @@ public class IndexSizeTrigger extends TriggerBase {
           return 0;
         }
       });
-      // take the top two smallest
+
+      // TODO: MERGESHARDS is not implemented yet. For now take the top two smallest shards
+      // TODO: but in the future we probably need to get ones with adjacent ranges.
+
+      // TODO: generate as many MERGESHARDS as needed to consume all belowSize shards
       TriggerEvent.Op op = new TriggerEvent.Op(belowOp);
       op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(0).getShard()));
       op.addHint(Suggester.Hint.COLL_SHARD, new Pair(coll, replicas.get(1).getShard()));

http://git-wip-us.apache.org/repos/asf/lucene-solr/blob/e94e0534/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
----------------------------------------------------------------------
diff --git a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
index a24b447..79dd019 100644
--- a/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/autoscaling/IndexSizeTriggerTest.java
@@ -29,6 +29,7 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.lucene.util.TestUtil;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.cloud.SolrCloudManager;
@@ -36,6 +37,7 @@ import org.apache.solr.client.solrj.cloud.autoscaling.AutoScalingConfig;
 import org.apache.solr.client.solrj.cloud.autoscaling.Suggester;
 import org.apache.solr.client.solrj.cloud.autoscaling.TriggerEventProcessorStage;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.UpdateRequest;
 import org.apache.solr.cloud.CloudTestUtils;
 import org.apache.solr.cloud.SolrCloudTestCase;
 import org.apache.solr.cloud.autoscaling.sim.SimCloudManager;
@@ -427,6 +429,202 @@ public class IndexSizeTriggerTest extends SolrCloudTestCase {
     unsupportedOps.forEach(op -> assertEquals(CollectionParams.CollectionAction.MERGESHARDS, op.getAction()));
   }
 
+  @Test
+  public void testMixedBounds() throws Exception {
+    if (cloudManager instanceof SimCloudManager) {
+      log.warn("Requires SOLR-12208");
+      return;
+    }
+
+    String collectionName = "testMixedBounds_collection";
+    CollectionAdminRequest.Create create = CollectionAdminRequest.createCollection(collectionName,
+        "conf", 2, 2).setMaxShardsPerNode(2);
+    create.process(solrClient);
+    CloudTestUtils.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
+        CloudTestUtils.clusterShape(2, 2));
+
+    for (int j = 0; j < 10; j++) {
+      UpdateRequest ureq = new UpdateRequest();
+      ureq.setParam("collection", collectionName);
+      for (int i = 0; i < 100; i++) {
+        SolrInputDocument doc = new SolrInputDocument("id", "id-" + (i * 100) + "-" + j);
+        doc.addField("foo", TestUtil.randomSimpleString(random(), 130, 130));
+        ureq.add(doc);
+      }
+      solrClient.request(ureq);
+    }
+    solrClient.commit(collectionName);
+
+    long waitForSeconds = 3 + random().nextInt(5);
+
+    // the trigger is initially disabled so that we have time to add listeners
+    // and have them capture all events once the trigger is enabled
+    String setTriggerCommand = "{" +
+        "'set-trigger' : {" +
+        "'name' : 'index_size_trigger'," +
+        "'event' : 'indexSize'," +
+        "'waitFor' : '" + waitForSeconds + "s'," +
+        // don't hit this limit when indexing
+        "'aboveDocs' : 10000," +
+        // hit this limit when deleting
+        "'belowDocs' : 100," +
+        // hit this limit when indexing
+        "'aboveBytes' : 150000," +
+        // don't hit this limit when deleting
+        "'belowBytes' : 10," +
+        "'enabled' : false," +
+        "'actions' : [{'name' : 'compute_plan', 'class' : 'solr.ComputePlanAction'}," +
+        "{'name' : 'execute_plan', 'class' : '" + ExecutePlanAction.class.getName() + "'}]" +
+        "}}";
+    SolrRequest req = createAutoScalingRequest(SolrRequest.METHOD.POST, setTriggerCommand);
+    NamedList<Object> response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    String setListenerCommand = "{" +
+        "'set-listener' : " +
+        "{" +
+        "'name' : 'capturing'," +
+        "'trigger' : 'index_size_trigger'," +
+        "'stage' : ['STARTED','ABORTED','SUCCEEDED','FAILED']," +
+        "'beforeAction' : ['compute_plan','execute_plan']," +
+        "'afterAction' : ['compute_plan','execute_plan']," +
+        "'class' : '" + CapturingTriggerListener.class.getName() + "'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, setListenerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    setListenerCommand = "{" +
+        "'set-listener' : " +
+        "{" +
+        "'name' : 'finished'," +
+        "'trigger' : 'index_size_trigger'," +
+        "'stage' : ['SUCCEEDED']," +
+        "'class' : '" + FinishedProcessingListener.class.getName() + "'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, setListenerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    // now enable the trigger
+    String resumeTriggerCommand = "{" +
+        "'resume-trigger' : {" +
+        "'name' : 'index_size_trigger'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, resumeTriggerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
+
+    boolean await = finished.await(90000 / SPEED, TimeUnit.MILLISECONDS);
+    assertTrue("did not finish processing in time", await);
+    assertEquals(1, listenerEvents.size());
+    List<CapturedEvent> events = listenerEvents.get("capturing");
+    assertNotNull("'capturing' events not found", events);
+    assertEquals("events: " + events, 6, events.size());
+    assertEquals(TriggerEventProcessorStage.STARTED, events.get(0).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(1).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(2).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(3).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(4).stage);
+    assertEquals(TriggerEventProcessorStage.SUCCEEDED, events.get(5).stage);
+
+    // collection should have 2 inactive and 4 active shards
+    CloudTestUtils.waitForState(cloudManager, "failed to create " + collectionName, collectionName,
+        CloudTestUtils.clusterShape(6, 2, true));
+
+    // check ops
+    List<TriggerEvent.Op> ops = (List<TriggerEvent.Op>) events.get(4).event.getProperty(TriggerEvent.REQUESTED_OPS);
+    assertNotNull("should contain requestedOps", ops);
+    assertEquals("number of ops", 2, ops.size());
+    boolean shard1 = false;
+    boolean shard2 = false;
+    for (TriggerEvent.Op op : ops) {
+      assertEquals(CollectionParams.CollectionAction.SPLITSHARD, op.getAction());
+      Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD);
+      assertNotNull("hints", hints);
+      assertEquals("hints", 1, hints.size());
+      Pair<String, String> p = hints.iterator().next();
+      assertEquals(collectionName, p.first());
+      if (p.second().equals("shard1")) {
+        shard1 = true;
+      } else if (p.second().equals("shard2")) {
+        shard2 = true;
+      } else {
+        fail("unexpected shard name " + p.second());
+      }
+    }
+    assertTrue("shard1 should be split", shard1);
+    assertTrue("shard2 should be split", shard2);
+
+    // now delete most of docs to trigger belowDocs condition
+    listenerEvents.clear();
+    finished = new CountDownLatch(1);
+
+    // suspend the trigger first so that we can safely delete all docs
+    String suspendTriggerCommand = "{" +
+        "'suspend-trigger' : {" +
+        "'name' : 'index_size_trigger'" +
+        "}" +
+        "}";
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, suspendTriggerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    for (int j = 0; j < 8; j++) {
+      UpdateRequest ureq = new UpdateRequest();
+      ureq.setParam("collection", collectionName);
+      for (int i = 0; i < 95; i++) {
+        ureq.deleteById("id-" + (i * 100) + "-" + j);
+      }
+      solrClient.request(ureq);
+    }
+    solrClient.commit(collectionName);
+
+    // resume trigger
+    req = createAutoScalingRequest(SolrRequest.METHOD.POST, resumeTriggerCommand);
+    response = solrClient.request(req);
+    assertEquals(response.get("result").toString(), "success");
+
+    timeSource.sleep(TimeUnit.MILLISECONDS.convert(waitForSeconds + 1, TimeUnit.SECONDS));
+
+    await = finished.await(90000 / SPEED, TimeUnit.MILLISECONDS);
+    assertTrue("did not finish processing in time", await);
+    assertEquals(1, listenerEvents.size());
+    events = listenerEvents.get("capturing");
+    assertNotNull("'capturing' events not found", events);
+    assertEquals("events: " + events, 6, events.size());
+    assertEquals(TriggerEventProcessorStage.STARTED, events.get(0).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(1).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(2).stage);
+    assertEquals(TriggerEventProcessorStage.BEFORE_ACTION, events.get(3).stage);
+    assertEquals(TriggerEventProcessorStage.AFTER_ACTION, events.get(4).stage);
+    assertEquals(TriggerEventProcessorStage.SUCCEEDED, events.get(5).stage);
+
+    // check ops
+    ops = (List<TriggerEvent.Op>) events.get(4).event.getProperty(TriggerEvent.REQUESTED_OPS);
+    assertNotNull("should contain requestedOps", ops);
+    assertTrue("number of ops: " + ops, ops.size() > 0);
+    for (TriggerEvent.Op op : ops) {
+      assertEquals(CollectionParams.CollectionAction.MERGESHARDS, op.getAction());
+      Set<Pair<String, String>> hints = (Set<Pair<String, String>>)op.getHints().get(Suggester.Hint.COLL_SHARD);
+      assertNotNull("hints", hints);
+      assertEquals("hints", 2, hints.size());
+      Pair<String, String> p = hints.iterator().next();
+      assertEquals(collectionName, p.first());
+    }
+
+    // TODO: fix this once MERGESHARDS is supported
+    List<TriggerEvent.Op> unsupportedOps = (List<TriggerEvent.Op>)events.get(2).context.get("properties.unsupportedOps");
+    assertNotNull("should have unsupportedOps", unsupportedOps);
+    assertEquals(unsupportedOps.toString() + "\n" + ops, ops.size(), unsupportedOps.size());
+    unsupportedOps.forEach(op -> assertEquals(CollectionParams.CollectionAction.MERGESHARDS, op.getAction()));
+  }
+
   private Map<String, Object> createTriggerProps(long waitForSeconds) {
     Map<String, Object> props = new HashMap<>();
     props.put("event", "indexSize");