You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by th...@apache.org on 2020/08/31 16:12:23 UTC
[lucene-solr] branch reference_impl_dev updated: Re-enabling some
CSC tests and move cluster setup to BeforeClass
This is an automated email from the ASF dual-hosted git repository.
thelabdude pushed a commit to branch reference_impl_dev
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git
The following commit(s) were added to refs/heads/reference_impl_dev by this push:
new a0bba1e Re-enabling some CSC tests and move cluster setup to BeforeClass
a0bba1e is described below
commit a0bba1e4c40b6666a2d900daf9dc3bc386ec3ad1
Author: Timothy Potter <th...@gmail.com>
AuthorDate: Mon Aug 31 10:12:08 2020 -0600
Re-enabling some CSC tests and move cluster setup to BeforeClass
---
.../client/solrj/impl/CloudSolrClientTest.java | 166 +++++++++++----------
1 file changed, 87 insertions(+), 79 deletions(-)
diff --git a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
index 59e8bba..469c706 100644
--- a/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
+++ b/solr/solrj/src/test/org/apache/solr/client/solrj/impl/CloudSolrClientTest.java
@@ -49,10 +49,8 @@ import org.apache.solr.client.solrj.request.CollectionAdminRequest;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.request.UpdateRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
-import org.apache.solr.client.solrj.response.RequestStatusState;
-import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.client.solrj.response.SolrPingResponse;
-import org.apache.solr.cloud.AbstractDistribZkTestBase;
+import org.apache.solr.client.solrj.response.UpdateResponse;
import org.apache.solr.cloud.SolrCloudTestCase;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.SolrDocumentList;
@@ -74,8 +72,8 @@ import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.handler.admin.ConfigSetsHandler;
import org.apache.solr.handler.admin.CoreAdminHandler;
import org.junit.After;
-import org.junit.AfterClass;
import org.junit.Before;
+import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Rule;
import org.junit.Test;
@@ -88,7 +86,6 @@ import org.slf4j.LoggerFactory;
* This test would be faster if we simulated the zk state instead.
*/
@Slow
-@Ignore // nocommit debug, TJP WIP ...
public class CloudSolrClientTest extends SolrCloudTestCase {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
@@ -101,19 +98,34 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
private static final int TIMEOUT = 30;
private static final int NODE_COUNT = 3;
- private static CloudHttp2SolrClient httpBasedCloudSolrClient = null;
+ private static final String TEST_CONFIGSET_NAME = "conf";
+ private CloudHttp2SolrClient httpBasedCloudSolrClient = null;
- @Before
- public void setupCluster() throws Exception {
+ @BeforeClass
+ public static void setupCluster() throws Exception {
configureCluster(NODE_COUNT)
- .addConfig("conf", getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
+ .addConfig(TEST_CONFIGSET_NAME, getFile("solrj").toPath().resolve("solr").resolve("configsets").resolve("streaming").resolve("conf"))
.configure();
+ }
+ @Before
+ public void initHttpBasedCloudSolrClient() {
final List<String> solrUrls = new ArrayList<>();
- solrUrls.add(cluster.getJettySolrRunner(0).getBaseUrl().toString());
+ solrUrls.add(cluster.getJettySolrRunner(0).getBaseUrl());
httpBasedCloudSolrClient = new CloudHttp2SolrClient.Builder(solrUrls).build();
}
+ private void createTestCollectionIfNeeded() throws IOException, SolrServerException {
+ createTestCollectionIfNeeded(COLLECTION, 2, 1);
+ }
+
+ private void createTestCollectionIfNeeded(String collection, int numShards, int numReplicas) throws IOException, SolrServerException {
+ final CloudHttp2SolrClient solrClient = cluster.getSolrClient();
+ if (!CollectionAdminRequest.listCollections(solrClient).contains(collection)) {
+ CollectionAdminRequest.createCollection(collection, TEST_CONFIGSET_NAME, numShards, numReplicas).process(solrClient);
+ }
+ cluster.waitForActiveCollection(collection, numShards, numShards * numReplicas);
+ }
@After
public void tearDown() throws Exception {
@@ -122,16 +134,17 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
httpBasedCloudSolrClient.close();
} catch (IOException e) {
throw new RuntimeException(e);
+ } finally {
+ httpBasedCloudSolrClient = null;
}
}
-
- shutdownCluster();
super.tearDown();
- }
- @AfterClass
- public static void cleanUpAfterClass() throws Exception {
- httpBasedCloudSolrClient = null;
+ // clear the shared collection before next test run
+ final CloudHttp2SolrClient solrClient = cluster.getSolrClient();
+ if (CollectionAdminRequest.listCollections(solrClient).contains(COLLECTION)) {
+ solrClient.deleteByQuery(COLLECTION, "*:*");
+ }
}
/**
@@ -143,8 +156,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
@Test
public void testParallelUpdateQTime() throws Exception {
- CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection(COLLECTION, 2, 2);
+ createTestCollectionIfNeeded();
UpdateRequest req = new UpdateRequest();
for (int i=0; i<10; i++) {
SolrInputDocument doc = new SolrInputDocument();
@@ -159,10 +171,8 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
@Test
public void testOverwriteOption() throws Exception {
- CollectionAdminRequest.createCollection("overwrite", "conf", 1, 1)
- .processAndWait(cluster.getSolrClient(), TIMEOUT);
- cluster.waitForActiveCollection("overwrite", 1, 1);
-
+ createTestCollectionIfNeeded("overwrite", 1, 1);
+
new UpdateRequest()
.add("id", "0", "a_t", "hello1")
.add("id", "0", "a_t", "hello2")
@@ -183,11 +193,8 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
@Test
public void testAliasHandling() throws Exception {
- CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection(COLLECTION, 2, 2);
-
- CollectionAdminRequest.createCollection(COLLECTION2, "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection(COLLECTION2, 2, 2);
+ createTestCollectionIfNeeded();
+ createTestCollectionIfNeeded(COLLECTION2, 2, 1);
CloudHttp2SolrClient client = getRandomClient();
SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc");
@@ -230,10 +237,10 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
}
@Test
+ @Ignore // TJP
public void testRouting() throws Exception {
- CollectionAdminRequest.createCollection("routing_collection", "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection("routing_collection", 2, 2);
-
+ createTestCollectionIfNeeded("routing_collection", 2, 1);
+
AbstractUpdateRequest request = new UpdateRequest()
.add(id, "0", "a_t", "hello1")
.add(id, "2", "a_t", "hello2")
@@ -417,7 +424,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
// Hence the below configuration for our collection
CollectionAdminRequest.createCollection(collectionName, "conf", liveNodes, liveNodes)
.setMaxShardsPerNode(liveNodes * liveNodes)
- .processAndWait(cluster.getSolrClient(), TIMEOUT);
+ .process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, liveNodes, liveNodes * liveNodes);
// Add some new documents
new UpdateRequest()
@@ -488,6 +495,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
* Tests if the 'shards.preference' parameter works with single-sharded collections.
*/
@Test
+ @Nightly // it's too slow
public void singleShardedPreferenceRules() throws Exception {
String collectionName = "singleShardPreferenceTestColl";
@@ -496,7 +504,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
// For testing replica.type, we want to have all replica types available for the collection
CollectionAdminRequest.createCollection(collectionName, "conf", 1, liveNodes/3, liveNodes/3, liveNodes/3)
.setMaxShardsPerNode(liveNodes)
- .processAndWait(cluster.getSolrClient(), TIMEOUT);
+ .process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, 1, liveNodes);
// Add some new documents
@@ -590,54 +598,51 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
public void testNonRetryableRequests() throws Exception {
try (CloudSolrClient client = SolrTestCaseJ4.getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
// important to have one replica on each node
- RequestStatusState state = CollectionAdminRequest.createCollection("foo", "conf", 1, NODE_COUNT).processAndWait(client, 60);
- if (state == RequestStatusState.COMPLETED) {
- cluster.waitForActiveCollection("foo", 1, NODE_COUNT);
- client.setDefaultCollection("foo");
-
- Map<String, String> adminPathToMbean = new HashMap<>(CommonParams.ADMIN_PATHS.size());
- adminPathToMbean.put(CommonParams.COLLECTIONS_HANDLER_PATH, CollectionsHandler.class.getName());
- adminPathToMbean.put(CommonParams.CORES_HANDLER_PATH, CoreAdminHandler.class.getName());
- adminPathToMbean.put(CommonParams.CONFIGSETS_HANDLER_PATH, ConfigSetsHandler.class.getName());
- // we do not add the authc/authz handlers because they do not currently expose any mbeans
-
- for (String adminPath : adminPathToMbean.keySet()) {
- long errorsBefore = 0;
- for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
- Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true);
- errorsBefore += numRequests;
- if (log.isInfoEnabled()) {
- log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
- }
+ CollectionAdminRequest.createCollection("foo", "conf", 1, NODE_COUNT).process(client);
+ cluster.waitForActiveCollection("foo", 1, NODE_COUNT);
+ client.setDefaultCollection("foo");
+
+ Map<String, String> adminPathToMbean = new HashMap<>(CommonParams.ADMIN_PATHS.size());
+ adminPathToMbean.put(CommonParams.COLLECTIONS_HANDLER_PATH, CollectionsHandler.class.getName());
+ adminPathToMbean.put(CommonParams.CORES_HANDLER_PATH, CoreAdminHandler.class.getName());
+ adminPathToMbean.put(CommonParams.CONFIGSETS_HANDLER_PATH, ConfigSetsHandler.class.getName());
+ // we do not add the authc/authz handlers because they do not currently expose any mbeans
+
+ for (String adminPath : adminPathToMbean.keySet()) {
+ long errorsBefore = 0;
+ for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
+ Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true);
+ errorsBefore += numRequests;
+ if (log.isInfoEnabled()) {
+ log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
}
+ }
- ModifiableSolrParams params = new ModifiableSolrParams();
- params.set("qt", adminPath);
- params.set("action", "foobar"); // this should cause an error
- QueryRequest req = new QueryRequest(params);
- try {
- NamedList<Object> resp = client.request(req);
- fail("call to foo for admin path " + adminPath + " should have failed");
- } catch (Exception e) {
- // expected
- }
- long errorsAfter = 0;
- for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
- Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true);
- errorsAfter += numRequests;
- if (log.isInfoEnabled()) {
- log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
- }
+ ModifiableSolrParams params = new ModifiableSolrParams();
+ params.set("qt", adminPath);
+ params.set("action", "foobar"); // this should cause an error
+ QueryRequest req = new QueryRequest(params);
+ try {
+ NamedList<Object> resp = client.request(req);
+ fail("call to foo for admin path " + adminPath + " should have failed");
+ } catch (Exception e) {
+ // expected
+ }
+ long errorsAfter = 0;
+ for (JettySolrRunner runner : cluster.getJettySolrRunners()) {
+ Long numRequests = getNumRequests(runner.getBaseUrl().toString(), "foo", "ADMIN", adminPathToMbean.get(adminPath), adminPath, true);
+ errorsAfter += numRequests;
+ if (log.isInfoEnabled()) {
+ log.info("Found {} requests to {} on {}", numRequests, adminPath, runner.getBaseUrl());
}
- assertEquals(errorsBefore + 1, errorsAfter);
}
- } else {
- fail("Collection could not be created within 60 seconds");
+ assertEquals(errorsBefore + 1, errorsAfter);
}
}
}
@Test
+ @Ignore // nocommit ~ hangs, possibly related to creating collections with processAsync
public void checkCollectionParameters() throws Exception {
try (CloudSolrClient client = SolrTestCaseJ4.getCloudSolrClient(cluster.getZkServer().getZkAddress())) {
@@ -684,8 +689,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
@Test
public void stateVersionParamTest() throws Exception {
- CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection(COLLECTION, 2, 2);
+ createTestCollectionIfNeeded();
DocCollection coll = cluster.getSolrClient().getZkStateReader().getClusterState().getCollection(COLLECTION);
Replica r = coll.getSlices().iterator().next().getReplicas().iterator().next();
@@ -752,6 +756,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
}
@Test
+ @Ignore // nocommit ~ client not respecting the 100 ms timeout in this test
public void testShutdown() throws IOException {
try (CloudSolrClient client = SolrTestCaseJ4.getCloudSolrClient(SolrTestCaseJ4.DEAD_HOST_1)) {
client.setZkConnectTimeout(100);
@@ -764,6 +769,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
public ExpectedException exception = ExpectedException.none();
@Test
+ @Ignore // nocommit ~ getting a NoNodeException instead of the exepcted SolrException
public void testWrongZkChrootTest() throws IOException {
try (CloudSolrClient client = SolrTestCaseJ4.getCloudSolrClient(cluster.getZkServer().getZkAddress() + "/xyz/foo")) {
client.setZkClientTimeout(1000 * 60);
@@ -785,10 +791,10 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
}
@Test
+ @Ignore // nocommit ~ possible regression? response doesn't contain "adds"?
public void testVersionsAreReturned() throws Exception {
- CollectionAdminRequest.createCollection("versions_collection", "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection("versions_collection", 2, 2);
-
+ createTestCollectionIfNeeded("versions_collection", 2, 1);
+
// assert that "adds" are returned
UpdateRequest updateRequest = new UpdateRequest()
.add("id", "1", "a_t", "hello1")
@@ -834,8 +840,8 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
@Test
public void testInitializationWithSolrUrls() throws Exception {
- CollectionAdminRequest.createCollection(COLLECTION, "conf", 2, 1).process(cluster.getSolrClient());
- cluster.waitForActiveCollection(COLLECTION, 2, 2);
+ createTestCollectionIfNeeded();
+
CloudHttp2SolrClient client = httpBasedCloudSolrClient;
SolrInputDocument doc = new SolrInputDocument("id", "1", "title_s", "my doc");
client.add(COLLECTION, doc);
@@ -851,6 +857,8 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
assertEquals("Collection not found: boguscollectionname", ex.getMessage());
}
+ @Test
+ @Ignore // nocommit ~ possible regression
public void testRetryUpdatesWhenClusterStateIsStale() throws Exception {
final String COL = "stale_state_test_col";
assert cluster.getJettySolrRunners().size() >= 2;
@@ -938,7 +946,7 @@ public class CloudSolrClientTest extends SolrCloudTestCase {
int pullReplicas = Math.max(1, liveNodes - 2);
CollectionAdminRequest.createCollection(collectionName, "conf", liveNodes, 1, 1, pullReplicas)
.setMaxShardsPerNode(liveNodes)
- .processAndWait(cluster.getSolrClient(), TIMEOUT);
+ .process(cluster.getSolrClient());
cluster.waitForActiveCollection(collectionName, liveNodes, liveNodes * (2 + pullReplicas));
// Add some new documents