You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ho...@apache.org on 2019/02/06 21:42:36 UTC
[lucene-solr] branch master updated: SOLR-13210: Fix
TriLevelCompositeIdRoutingTest to actually make sense
This is an automated email from the ASF dual-hosted git repository.
hossman pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git
The following commit(s) were added to refs/heads/master by this push:
new 87ad59f SOLR-13210: Fix TriLevelCompositeIdRoutingTest to actually make sense
87ad59f is described below
commit 87ad59f826d3ea5ea0e6239397c1d9a8acf59323
Author: Chris Hostetter <ho...@apache.org>
AuthorDate: Wed Feb 6 14:42:30 2019 -0700
SOLR-13210: Fix TriLevelCompositeIdRoutingTest to actually make sense
---
.../solr/cloud/TriLevelCompositeIdRoutingTest.java | 159 ++++++++++-----------
1 file changed, 74 insertions(+), 85 deletions(-)
diff --git a/solr/core/src/test/org/apache/solr/cloud/TriLevelCompositeIdRoutingTest.java b/solr/core/src/test/org/apache/solr/cloud/TriLevelCompositeIdRoutingTest.java
index 05a0ab9..a18f88c 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TriLevelCompositeIdRoutingTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TriLevelCompositeIdRoutingTest.java
@@ -16,6 +16,13 @@
*/
package org.apache.solr.cloud;
+import java.lang.invoke.MethodHandles;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+
import org.apache.lucene.util.TestUtil;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
@@ -24,18 +31,15 @@ import org.junit.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
-import java.lang.invoke.MethodHandles;
-import java.util.HashMap;
-import java.util.HashSet;
-import java.util.Set;
public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
- final int NUM_APPS;
- final int NUM_USERS;
- final int NUM_DOCS;
+ final int MAX_APP_ID;
+ final int MAX_USER_ID;
+ final int MAX_DOC_ID;
+ final int NUM_ADDS;
@BeforeClass
@@ -51,14 +55,14 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
public TriLevelCompositeIdRoutingTest() {
schemaString = "schema15.xml"; // we need a string id
-
sliceCount = TestUtil.nextInt(random(), 1, (TEST_NIGHTLY ? 5 : 3)); // this is the number of *SHARDS*
int replicationFactor = rarely() ? 2 : 1; // replication is not the focus of this test
fixShardCount(replicationFactor * sliceCount); // total num cores, one per node
-
- NUM_APPS = atLeast(5);
- NUM_USERS = atLeast(10);
- NUM_DOCS = atLeast(100);
+
+ MAX_APP_ID = atLeast(5);
+ MAX_USER_ID = atLeast(10);
+ MAX_DOC_ID = atLeast(20);
+ NUM_ADDS = atLeast(200);
}
@Test
@@ -71,11 +75,57 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
// todo: do I have to do this here?
waitForRecoveriesToFinish(true);
- doTriLevelHashingTest();
- del("*:*");
+ // NOTE: we might randomly generate the same uniqueKey value multiple times,
+ // (which is a valid test case, they should route to the same shard both times)
+ // so we track each expectedId in a set for later sanity checking
+ final Set<String> expectedUniqueKeys = new HashSet<>();
+ for (int i = 0; i < NUM_ADDS; i++) {
+ final int appId = r.nextInt(MAX_APP_ID) + 1;
+ final int userId = r.nextInt(MAX_USER_ID) + 1;
+ // skew the odds so half the time we have no mask, and half the time we
+ // have an even distribution of 1-16 bits
+ final int bitMask = Math.max(0, r.nextInt(32)-15);
+
+ String id = "app" + appId + (bitMask <= 0 ? "" : ("/" + bitMask))
+ + "!" + "user" + userId
+ + "!" + "doc" + r.nextInt(MAX_DOC_ID);
+
+ doAddDoc(id);
+ expectedUniqueKeys.add(id);
+ }
+
commit();
- doTriLevelHashingTestWithBitMask();
+
+ final Map<String, String> routePrefixMap = new HashMap<>();
+ final Set<String> actualUniqueKeys = new HashSet<>();
+ for (int i = 1; i <= sliceCount; i++) {
+ final String shardId = "shard" + i;
+ final Set<String> uniqueKeysInShard = fetchUniqueKeysFromShard(shardId);
+
+ { // sanity check our uniqueKey values aren't duplicated across shards
+ final Set<String> uniqueKeysOnDuplicateShards = new HashSet<>(uniqueKeysInShard);
+ uniqueKeysOnDuplicateShards.retainAll(actualUniqueKeys);
+ assertEquals(shardId + " contains some uniqueKeys that were already found on a previous shard",
+ Collections.emptySet(), uniqueKeysOnDuplicateShards);
+ actualUniqueKeys.addAll(uniqueKeysInShard);
+ }
+
+ // foreach uniqueKey, extract it's route prefix and confirm those aren't spread across multiple shards
+ for (String uniqueKey : uniqueKeysInShard) {
+ final String routePrefix = uniqueKey.substring(0, uniqueKey.lastIndexOf('!'));
+ log.debug("shard( {} ) : uniqueKey( {} ) -> routePrefix( {} )", shardId, uniqueKey, routePrefix);
+ assertNotNull("null prefix WTF? " + uniqueKey, routePrefix);
+
+ final String otherShard = routePrefixMap.put(routePrefix, shardId);
+ if (null != otherShard)
+ // if we already had a mapping, make sure it's an earlier doc from our current shard...
+ assertEquals("routePrefix " + routePrefix + " found in multiple shards",
+ shardId, otherShard);
+ }
+ }
+ assertEquals("Docs missing?", expectedUniqueKeys.size(), actualUniqueKeys.size());
+
testFinished = true;
} finally {
if (!testFinished) {
@@ -83,82 +133,21 @@ public class TriLevelCompositeIdRoutingTest extends ShardRoutingTest {
}
}
}
-
- private void doTriLevelHashingTest() throws Exception {
- log.info("### STARTING doTriLevelHashingTest");
- // for now, we know how ranges will be distributed to shards.
- // may have to look it up in clusterstate if that assumption changes.
-
- for (int i = 0; i < NUM_DOCS; i++) {
- int appId = r.nextInt(NUM_APPS) + 1;
- int userId = r.nextInt(NUM_USERS) + 1;
-
- String id = "app" + appId + "!" + "user" + userId + "!" + "doc" + r.nextInt(100);
- doAddDoc(id);
-
- }
-
- commit();
-
- HashMap<String, Integer> idMap = new HashMap<>();
-
- for (int i = 1; i <= sliceCount; i++) {
-
- Set<String> ids = doQueryGetUniqueIdKeys("q", "*:*", "rows", ""+NUM_DOCS, "shards", "shard" + i);
- for (String id : ids) {
- assertFalse("Found the same route key [" + id + "] in 2 shards.", idMap.containsKey(id));
- idMap.put(getKey(id), i);
- }
- }
-
- }
-
-
- private void doTriLevelHashingTestWithBitMask() throws Exception {
- log.info("### STARTING doTriLevelHashingTestWithBitMask");
- // for now, we know how ranges will be distributed to shards.
- // may have to look it up in clusterstate if that assumption changes.
-
- for (int i = 0; i < NUM_DOCS; i++) {
- int appId = r.nextInt(NUM_APPS) + 1;
- int userId = r.nextInt(NUM_USERS) + 1;
- int bitMask = r.nextInt(16) + 1;
-
- String id = "app" + appId + "/" + bitMask + "!" + "user" + userId + "!" + "doc" + r.nextInt(100);
- doAddDoc(id);
-
- }
-
- commit();
-
- HashMap<String, Integer> idMap = new HashMap<>();
-
- for (int i = 1; i <= sliceCount; i++) {
-
- Set<String> ids = doQueryGetUniqueIdKeys("q", "*:*", "rows", ""+NUM_DOCS, "shards", "shard" + i);
- for (String id : ids) {
- assertFalse("Found the same route key [" + id + "] in 2 shards.", idMap.containsKey(id));
- idMap.put(getKey(id), i);
- }
- }
-
- }
-
+
void doAddDoc(String id) throws Exception {
index("id", id);
// todo - target diff servers and use cloud clients as well as non-cloud clients
}
- Set<String> doQueryGetUniqueIdKeys(String... queryParams) throws Exception {
- QueryResponse rsp = cloudClient.query(params(queryParams));
- Set<String> obtainedIdKeys = new HashSet<>();
+ private Set<String> fetchUniqueKeysFromShard(final String shardId) throws Exception {
+ // NUM_ADDS is an absolute upper bound on the num docs in the index
+ QueryResponse rsp = cloudClient.query(params("q", "*:*", "rows", ""+NUM_ADDS, "shards", shardId));
+ Set<String> uniqueKeys = new HashSet<>();
for (SolrDocument doc : rsp.getResults()) {
- obtainedIdKeys.add(getKey((String) doc.get("id")));
+ final String id = (String) doc.get("id");
+ assertNotNull("null id WTF? " + doc.toString(), id);
+ uniqueKeys.add(id);
}
- return obtainedIdKeys;
- }
-
- private String getKey(String id) {
- return id.substring(0, id.lastIndexOf('!'));
+ return uniqueKeys;
}
}