You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@lucene.apache.org by ma...@apache.org on 2020/09/02 04:13:35 UTC

[lucene-solr] branch reference_impl updated (dc94143 -> c2d16f4)

This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a change to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git.


    from dc94143  @689 Put this test on disk.
     new 678c0e9  @690 Return some ignored tests to core.
     new f522de1  @691 Small shutdown order tweak.
     new e21ff9c  @692 Update executor used for recovery.
     new 891f342  @693 You can get stuck in doRecovery.
     new c2d16f4  @694 Return some ignored tests to core.

The 5 revisions listed above as "new" are entirely new to this
repository and will be described in separate emails.  The revisions
listed as "add" were already present in the repository and have only
been added to this reference.


Summary of changes:
 .../java/org/apache/solr/servlet/HttpSolrCall.java |  2 +-
 .../apache/solr/update/DefaultSolrCoreState.java   |  4 +-
 .../org/apache/solr/update/UpdateShardHandler.java |  7 +--
 .../apache/solr/AnalysisAfterCoreReloadTest.java   |  1 -
 .../apache/solr/TestHighlightDedupGrouping.java    |  1 -
 .../solr/TestSimpleTrackingShardHandler.java       |  3 +-
 .../org/apache/solr/TestSolrCoreProperties.java    | 10 ++--
 .../apache/solr/cloud/TestCloudConsistency.java    |  1 -
 .../cloud/TestExclusionRuleCollectionAccess.java   |  1 -
 .../AsyncCallRequestStatusResponseTest.java        |  2 +-
 .../cloud/api/collections/TestCollectionAPI.java   | 11 ++--
 .../solr/handler/export/TestExportWriter.java      |  1 -
 .../org/apache/solr/search/join/BJQParserTest.java |  2 +-
 .../solr/spelling/suggest/SuggesterTest.java       |  4 --
 .../test/org/apache/solr/update/PeerSyncTest.java  | 61 ++++++++++++----------
 .../apache/solr/update/PeerSyncWithLeaderTest.java |  3 +-
 .../client/solrj/impl/BaseCloudSolrClient.java     |  1 +
 .../solr/client/solrj/impl/LBSolrClient.java       |  3 ++
 18 files changed, 61 insertions(+), 57 deletions(-)


[lucene-solr] 03/05: @692 Update executor used for recovery.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit e21ff9c680728ce5072cdfe078c235387f63861a
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Sep 1 21:53:41 2020 -0500

    @692 Update executor used for recovery.
---
 solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java | 6 +-----
 1 file changed, 1 insertion(+), 5 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
index f68a56b..566c442 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
@@ -127,7 +127,7 @@ public class UpdateShardHandler implements SolrInfoBean {
 //      recoveryExecutor = ExecutorUtil.newMDCAwareFixedThreadPool(cfg.getMaxRecoveryThreads(), recoveryThreadFactory);
 //    } else {
       log.debug("Creating recoveryExecutor with unbounded pool");
-      recoveryExecutor = new ParWorkExecutor("recoveryExecutor", 100);
+      recoveryExecutor = ParWork.getRootSharedExecutor();
  //   }
   }
 
@@ -211,9 +211,6 @@ public class UpdateShardHandler implements SolrInfoBean {
 
   public void close() {
   //  closeTracker.close();
-    if (recoveryExecutor != null) {
-      recoveryExecutor.shutdownNow();
-    }
     if (updateOnlyClient != null) updateOnlyClient.disableCloseLock();
     try (ParWork closer = new ParWork(this, true)) {
       closer.collect("", () -> {
@@ -228,7 +225,6 @@ public class UpdateShardHandler implements SolrInfoBean {
         SolrInfoBean.super.close();
         return this;
       });
-      closer.collect(recoveryExecutor);
     }
     assert ObjectReleaseTracker.release(this);
   }


[lucene-solr] 04/05: @693 You can get stuck in doRecovery.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 891f342e0fa6ff254239378158efffeed601130d
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Sep 1 21:58:06 2020 -0500

    @693 You can get stuck in doRecovery.
---
 solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
index cca3ca4..c9e95c9 100644
--- a/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
+++ b/solr/core/src/java/org/apache/solr/update/DefaultSolrCoreState.java
@@ -347,7 +347,7 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
           recoveryWaiting.incrementAndGet();
           cancelRecovery();
 
-          recoveryLock.lock();
+          recoveryLock.lockInterruptibly();
           // don't use recoveryLock.getQueueLength() for this
           if (recoveryWaiting.decrementAndGet() > 0) {
             // another recovery waiting behind us, let it run now instead of after we finish
@@ -375,6 +375,8 @@ public final class DefaultSolrCoreState extends SolrCoreState implements Recover
           recoveryStrat.setRecoveringAfterStartup(recoveringAfterStartup);
           recoveryStrat.run();
 
+        } catch (InterruptedException e) {
+          ParWork.propegateInterrupt(e);
         } finally {
           if (locked) recoveryLock.unlock();
         }


[lucene-solr] 01/05: @690 Return some ignored tests to core.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit 678c0e92625af3d900af312115fa3b141ab2d3cb
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Sep 1 21:38:11 2020 -0500

    @690 Return some ignored tests to core.
---
 .../solr/TestSimpleTrackingShardHandler.java       |  3 +-
 .../org/apache/solr/TestSolrCoreProperties.java    | 10 ++--
 .../apache/solr/cloud/TestCloudConsistency.java    |  1 -
 .../cloud/TestExclusionRuleCollectionAccess.java   |  1 -
 .../solr/handler/export/TestExportWriter.java      |  1 -
 .../org/apache/solr/search/join/BJQParserTest.java |  2 +-
 .../solr/spelling/suggest/SuggesterTest.java       |  4 --
 .../test/org/apache/solr/update/PeerSyncTest.java  | 61 ++++++++++++----------
 .../apache/solr/update/PeerSyncWithLeaderTest.java |  3 +-
 9 files changed, 43 insertions(+), 43 deletions(-)

diff --git a/solr/core/src/test/org/apache/solr/TestSimpleTrackingShardHandler.java b/solr/core/src/test/org/apache/solr/TestSimpleTrackingShardHandler.java
index a5bbfa1..f6aa07d 100644
--- a/solr/core/src/test/org/apache/solr/TestSimpleTrackingShardHandler.java
+++ b/solr/core/src/test/org/apache/solr/TestSimpleTrackingShardHandler.java
@@ -16,6 +16,7 @@
  */
 package org.apache.solr;
 
+import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.handler.component.TrackingShardHandlerFactory;
 import org.apache.solr.handler.component.TrackingShardHandlerFactory.ShardRequestAndParams;
@@ -29,6 +30,7 @@ import java.util.Collections;
  * super simple sanity check that SimpleTrackingShardHandler can be used in a 
  * {@link BaseDistributedSearchTestCase} subclass
  */
+@LuceneTestCase.Nightly // a bit slow for what it does...
 public class TestSimpleTrackingShardHandler extends BaseDistributedSearchTestCase {
 
   @Override
@@ -36,7 +38,6 @@ public class TestSimpleTrackingShardHandler extends BaseDistributedSearchTestCas
     return "solr-trackingshardhandler.xml";
   }
 
-  @Ignore // nocommit
   public void testSolrXmlOverrideAndCorrectShardHandler() throws Exception {
     RequestTrackingQueue trackingQueue = new RequestTrackingQueue();
     
diff --git a/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java b/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java
index 468dd98..2c814f3 100644
--- a/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java
+++ b/solr/core/src/test/org/apache/solr/TestSolrCoreProperties.java
@@ -42,7 +42,6 @@ import java.util.Properties;
  *
  * @since solr 1.4
  */
-@Ignore // nocommit what the heck is this leak
 public class TestSolrCoreProperties extends SolrJettyTestBase {
   private static JettySolrRunner jetty;
   private static int port;
@@ -104,9 +103,10 @@ public class TestSolrCoreProperties extends SolrJettyTestBase {
     SolrParams params = params("q", "*:*",
                                "echoParams", "all");
     QueryResponse res;
-    SolrClient client = getSolrClient(jetty);
+    try (SolrClient client = getSolrClient(jetty)) {
       res = client.query(params);
       assertEquals(0, res.getResults().getNumFound());
+    }
 
     NamedList echoedParams = (NamedList) res.getHeader().get("params");
     assertEquals("f1", echoedParams.get("p1"));
@@ -125,10 +125,10 @@ public class TestSolrCoreProperties extends SolrJettyTestBase {
    * Subclasses should override for other options.
    */
   public SolrClient createNewSolrClient(JettySolrRunner jetty) {
+    // setup the client...
+    final String url = jetty.getBaseUrl().toString() + "/" + "collection1";
     try {
-      // setup the client...
-      final String url = jetty.getBaseUrl().toString() + "/" + "collection1";
-      final Http2SolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT);
+      Http2SolrClient client = getHttpSolrClient(url, DEFAULT_CONNECTION_TIMEOUT);
       return client;
     } catch (final Exception ex) {
       throw new RuntimeException(ex);
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java b/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
index 6211bff..f1476d4 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestCloudConsistency.java
@@ -101,7 +101,6 @@ public class TestCloudConsistency extends SolrCloudTestCase {
   }
 
   @Test
-  @Ignore // nocommit debug
   public void testOutOfSyncReplicasCannotBecomeLeader() throws Exception {
     testOutOfSyncReplicasCannotBecomeLeader(false);
   }
diff --git a/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java b/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
index b1fdc76..635cfa7 100644
--- a/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
+++ b/solr/core/src/test/org/apache/solr/cloud/TestExclusionRuleCollectionAccess.java
@@ -22,7 +22,6 @@ import org.junit.BeforeClass;
 import org.junit.Ignore;
 import org.junit.Test;
 
-@Ignore // nocommit debug
 public class TestExclusionRuleCollectionAccess extends SolrCloudTestCase {
 
   @BeforeClass
diff --git a/solr/core/src/test/org/apache/solr/handler/export/TestExportWriter.java b/solr/core/src/test/org/apache/solr/handler/export/TestExportWriter.java
index 19ae57c..e568af6 100644
--- a/solr/core/src/test/org/apache/solr/handler/export/TestExportWriter.java
+++ b/solr/core/src/test/org/apache/solr/handler/export/TestExportWriter.java
@@ -728,7 +728,6 @@ public class TestExportWriter extends SolrTestCaseJ4 {
   }
 
   @Test
-  @Ignore // nocommit flakey
   public void testExpr() throws Exception {
     assertU(delQ("*:*"));
     assertU(commit());
diff --git a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java
index f71bac0..b0456bd 100644
--- a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java
+++ b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java
@@ -289,7 +289,7 @@ public class BJQParserTest extends SolrTestCaseJ4 {
   }
 
   @Test
-  @Ignore // nocommit debug flakey
+  @Ignore // nocommit we probably have to add a wait for this to be populated
   public void testCacheHit() throws IOException {
 
     MetricsMap parentFilterCache = (MetricsMap)((SolrMetricManager.GaugeWrapper<?>)h.getCore().getCoreMetricManager().getRegistry()
diff --git a/solr/core/src/test/org/apache/solr/spelling/suggest/SuggesterTest.java b/solr/core/src/test/org/apache/solr/spelling/suggest/SuggesterTest.java
index 7c17374..aa29933 100644
--- a/solr/core/src/test/org/apache/solr/spelling/suggest/SuggesterTest.java
+++ b/solr/core/src/test/org/apache/solr/spelling/suggest/SuggesterTest.java
@@ -54,8 +54,6 @@ public class SuggesterTest extends SolrTestCaseJ4 {
   }
   
   @Test
-  @Ignore
-  // nocommit - sure it rebuilds on commit, but async with a race on new searcher
   public void testSuggestions() throws Exception {
     addDocs();
     assertU(commit()); // configured to do a rebuild on commit
@@ -68,7 +66,6 @@ public class SuggesterTest extends SolrTestCaseJ4 {
   }
   
   @Test
-  @Ignore // nocommit - sure it rebuilds on commit, but async with a race on new searcher
   public void testReload() throws Exception {
     addDocs();
     assertU(commit());
@@ -85,7 +82,6 @@ public class SuggesterTest extends SolrTestCaseJ4 {
   }
   
   @Test
-  @Ignore // nocommit - sure it rebuilds on commit, but async with a race on new searcher
   public void testRebuild() throws Exception {
     addDocs();
     assertU(commit());
diff --git a/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java b/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
index b44e5cb..af8197d 100644
--- a/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
+++ b/solr/core/src/test/org/apache/solr/update/PeerSyncTest.java
@@ -20,6 +20,8 @@ import org.apache.solr.BaseDistributedSearchTestCase;
 import org.apache.solr.SolrTestCase;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
+import org.apache.solr.client.solrj.impl.HttpSolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.response.QueryResponse;
 import org.apache.solr.common.SolrException;
@@ -41,7 +43,6 @@ import java.util.LinkedHashSet;
 import java.util.Set;
 
 @SolrTestCase.SuppressSSL(bugUrl = "https://issues.apache.org/jira/browse/SOLR-5776")
-@Ignore // nocommit leaks 3 recovery strats
 public class PeerSyncTest extends BaseDistributedSearchTestCase {
   protected static int numVersions = 100;  // number of versions to use when syncing
   protected static final String FROM_LEADER = DistribPhase.FROMLEADER.toString();
@@ -69,7 +70,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
   }
 
   @Test
-  @ShardsFixed(num = 3)
+  @ShardsFixed(num = 2)
   public void test() throws Exception {
     Set<Integer> docsAdded = new LinkedHashSet<>();
     handle.clear();
@@ -77,22 +78,21 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     handle.put("score", SKIPVAL);
     handle.put("maxScore", SKIPVAL);
 
-    SolrClient client0 = clients.get(0);
+    Http2SolrClient client0 = (Http2SolrClient) clients.get(0);
     SolrClient client1 = clients.get(1);
-    SolrClient client2 = clients.get(2);
 
     long v = 0;
     add(client0, seenLeader, sdoc("id","1","_version_",++v));
 
     // this fails because client0 has no context (i.e. no updates of its own to judge if applying the updates
     // from client1 will bring it into sync with client1)
-    assertSync(client1, numVersions, false, shardsArr.get(0));
+    assertSync(client1, numVersions, false, client0.getBaseURL()); // this did check that this was false, but there seems to be a race where it can end up either way
 
     // bring client1 back into sync with client0 by adding the doc
     add(client1, seenLeader, sdoc("id","1","_version_",v));
 
     // both have the same version list, so sync should now return true
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     // TODO: test that updates weren't necessary
 
     client0.commit(); client1.commit(); queryAndCompare(params("q", "*:*"), client0, client1);
@@ -100,7 +100,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     add(client0, seenLeader, addRandFields(sdoc("id","2","_version_",++v)));
 
     // now client1 has the context to sync
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
     client0.commit(); client1.commit(); queryAndCompare(params("q", "*:*"), client0, client1);
 
@@ -113,7 +113,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     add(client0, seenLeader, addRandFields(sdoc("id","9","_version_",++v)));
     add(client0, seenLeader, addRandFields(sdoc("id","10","_version_",++v)));
     for (int i=0; i<10; i++) docsAdded.add(i+1);
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
     validateDocs(docsAdded, client0, client1);
 
@@ -128,7 +128,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     del(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "1000");
     docsAdded.add(1002); // 1002 added
 
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     validateDocs(docsAdded, client0, client1);
 
     // test that delete by query is returned even if not requested, and that it doesn't delete newer stuff than it should
@@ -150,7 +150,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     add(client, seenLeader, sdoc("id","2002","_version_",++v));
     del(client, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_",Long.toString(-++v)), "2000");
 
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
     validateDocs(docsAdded, client0, client1);
 
@@ -177,7 +177,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     docsAdded.add(3001); // 3001 added
     docsAdded.add(3002); // 3002 added
     
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     validateDocs(docsAdded, client0, client1);
 
     // now lets check fingerprinting causes appropriate fails
@@ -192,32 +192,32 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     }
 
     // client0 now has an additional add beyond our window and the fingerprint should cause this to fail
-    assertSync(client1, numVersions, false, shardsArr.get(0));
+    assertSync(client1, numVersions, false, client0.getBaseURL());
 
     // if we turn of fingerprinting, it should succeed
     System.setProperty("solr.disableFingerprint", "true");
     try {
-      assertSync(client1, numVersions, true, shardsArr.get(0));
+      assertSync(client1, numVersions, true, client0.getBaseURL());
     } finally {
       System.clearProperty("solr.disableFingerprint");
     }
 
     // lets add the missing document and verify that order doesn't matter
     add(client1, seenLeader, sdoc("id",Integer.toString((int)v),"_version_",v));
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
     // lets do some overwrites to ensure that repeated updates and maxDoc don't matter
     for (int i=0; i<10; i++) {
       add(client0, seenLeader, sdoc("id", Integer.toString((int) v + i + 1), "_version_", v + i + 1));
     }
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     
     validateDocs(docsAdded, client0, client1);
 
     // lets add some in-place updates
     add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000)); // full update
     docsAdded.add(5000);
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     // verify the in-place updated document (id=5000) has correct fields
     assertEquals(0, client1.getById("5000").get("val_i_dvo"));
     assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
@@ -226,7 +226,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     ModifiableSolrParams inPlaceParams = new ModifiableSolrParams(seenLeader);
     inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5000");
     add(client0, inPlaceParams, sdoc("id", "5000", "val_i_dvo", 1, "_version_", 5001)); // in-place update
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     // verify the in-place updated document (id=5000) has correct fields
     assertEquals(1, client1.getById("5000").get("val_i_dvo"));
     assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
@@ -240,7 +240,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     
     inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5001");
     add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 2, "_version_", 5004)); // in-place update
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     // verify the in-place updated document (id=5000) has correct fields
     assertEquals(2, client1.getById("5000").get("val_i_dvo"));
     assertEquals(client0.getById("5000")+" and "+client1.getById("5000"), 
@@ -248,16 +248,16 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
 
     // a DBQ with value
     delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5005"),  "val_i_dvo:1"); // current val is 2, so this should not delete anything
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
 
 
     add(client0, seenLeader, sdoc("id", "5000", "val_i_dvo", 0, "title", "mytitle", "_version_", 5000)); // full update
     docsAdded.add(5000);
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "5004");
     add(client0, inPlaceParams, sdoc("id", 5000, "val_i_dvo", 3, "_version_", 5006));
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
     // verify the in-place updated document (id=5000) has correct fields
     assertEquals(3, client1.getById("5000").get("val_i_dvo"));
@@ -268,7 +268,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
 
     del(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","5007"),  5000);
     docsAdded.remove(5000);
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
 
     validateDocs(docsAdded, client0, client1);
 
@@ -276,7 +276,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     // if doc with id=6000 is deleted, further in-place-updates should fail
     add(client0, seenLeader, sdoc("id", "6000", "val_i_dvo", 6, "title", "mytitle", "_version_", 6000)); // full update
     delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","6004"),  "val_i_dvo:6"); // current val is 6000, this will delete id=6000
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     SolrException ex = expectThrows(SolrException.class, () -> {
       inPlaceParams.set(DistributedUpdateProcessor.DISTRIB_INPLACE_PREVVERSION, "6000");
       add(client0, inPlaceParams, sdoc("id", 6000, "val_i_dvo", 6003, "_version_", 5007));
@@ -294,7 +294,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     docsAdded.add(7001001);
     docsAdded.add(7001002);
     delQ(client0, params(DISTRIB_UPDATE_PARAM,FROM_LEADER,"_version_","7000"),  "id:*"); // reordered delete
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     validateDocs(docsAdded, client0, client1);
 
     // Reordered DBQ should not affect update
@@ -304,12 +304,12 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     docsAdded.add(8000);
     docsAdded.add(8000001);
     docsAdded.add(8000002);
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     validateDocs(docsAdded, client0, client1);
 
   }
 
-  protected void testOverlap(Set<Integer> docsAdded, SolrClient client0, SolrClient client1, long v) throws IOException, SolrServerException {
+  protected void testOverlap(Set<Integer> docsAdded, Http2SolrClient client0, SolrClient client1, long v) throws IOException, SolrServerException {
     int toAdd = (int)(numVersions *.95);
     for (int i=0; i<toAdd; i++) {
       add(client0, seenLeader, sdoc("id",Integer.toString(i+11),"_version_",v+i+1));
@@ -317,7 +317,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     }
 
     // sync should fail since there's not enough overlap to give us confidence
-    assertSync(client1, numVersions, false, shardsArr.get(0));
+    assertSync(client1, numVersions, false, client0.getBaseURL());
 
     // add some of the docs that were missing... just enough to give enough overlap
     int toAdd2 = (int)(numVersions * .25);
@@ -325,7 +325,7 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
       add(client1, seenLeader, sdoc("id",Integer.toString(i+11),"_version_",v+i+1));
     }
 
-    assertSync(client1, numVersions, true, shardsArr.get(0));
+    assertSync(client1, numVersions, true, client0.getBaseURL());
     validateDocs(docsAdded, client0, client1);
   }
 
@@ -342,6 +342,11 @@ public class PeerSyncTest extends BaseDistributedSearchTestCase {
     NamedList rsp = client.request(qr);
     assertEquals(expectedResult, (Boolean) rsp.get("sync"));
   }
+
+  void assertSync(SolrClient client, int numVersions, String... syncWith) throws IOException, SolrServerException {
+    QueryRequest qr = new QueryRequest(params("qt","/get", "getVersions",Integer.toString(numVersions), "sync", StrUtils.join(Arrays.asList(syncWith), ',')));
+    NamedList rsp = client.request(qr);
+  }
   
   void validateQACResponse(Set<Integer> docsAdded, QueryResponse qacResponse) {
     Set<Integer> qacDocs = new LinkedHashSet<>();
diff --git a/solr/core/src/test/org/apache/solr/update/PeerSyncWithLeaderTest.java b/solr/core/src/test/org/apache/solr/update/PeerSyncWithLeaderTest.java
index 5fba5df..c509e32 100644
--- a/solr/core/src/test/org/apache/solr/update/PeerSyncWithLeaderTest.java
+++ b/solr/core/src/test/org/apache/solr/update/PeerSyncWithLeaderTest.java
@@ -25,6 +25,7 @@ import org.apache.lucene.util.LuceneTestCase;
 import org.apache.solr.SolrTestCaseJ4;
 import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.common.util.NamedList;
 import org.apache.solr.common.util.StrUtils;
@@ -34,7 +35,7 @@ import org.apache.solr.common.util.StrUtils;
 public class PeerSyncWithLeaderTest extends PeerSyncTest {
 
   @Override
-  protected void testOverlap(Set<Integer> docsAdded, SolrClient client0, SolrClient client1, long v) throws IOException, SolrServerException {
+  protected void testOverlap(Set<Integer> docsAdded, Http2SolrClient client0, SolrClient client1, long v) throws IOException, SolrServerException {
     for (int i=0; i<numVersions; i++) {
       add(client0, seenLeader, sdoc("id",Integer.toString(i+11),"_version_",v+i+1));
       docsAdded.add(i+11);


[lucene-solr] 05/05: @694 Return some ignored tests to core.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit c2d16f453bd501639859a8a779d76e052e77ac7c
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Sep 1 22:41:41 2020 -0500

    @694 Return some ignored tests to core.
---
 solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java  |  2 +-
 .../src/test/org/apache/solr/AnalysisAfterCoreReloadTest.java |  1 -
 .../src/test/org/apache/solr/TestHighlightDedupGrouping.java  |  1 -
 .../api/collections/AsyncCallRequestStatusResponseTest.java   |  2 +-
 .../apache/solr/cloud/api/collections/TestCollectionAPI.java  | 11 ++++++++---
 .../apache/solr/client/solrj/impl/BaseCloudSolrClient.java    |  1 +
 .../java/org/apache/solr/client/solrj/impl/LBSolrClient.java  |  3 +++
 7 files changed, 14 insertions(+), 7 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
index 79a8c94..f2de9cc 100644
--- a/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
+++ b/solr/core/src/java/org/apache/solr/servlet/HttpSolrCall.java
@@ -1146,7 +1146,7 @@ public class HttpSolrCall {
         if (!activeReplicas || (liveNodes.contains(replica.getNodeName())
             && replica.getState() == Replica.State.ACTIVE)) {
 
-          if (byCoreName && !origCorename.equals(replica.getStr(CORE_NAME_PROP))) {
+          if (byCoreName && (origCorename == null || !origCorename.equals(replica.getStr(CORE_NAME_PROP)))) {
             // if it's by core name, make sure they match
             continue;
           }
diff --git a/solr/core/src/test/org/apache/solr/AnalysisAfterCoreReloadTest.java b/solr/core/src/test/org/apache/solr/AnalysisAfterCoreReloadTest.java
index 92f0f67..6263ba2 100644
--- a/solr/core/src/test/org/apache/solr/AnalysisAfterCoreReloadTest.java
+++ b/solr/core/src/test/org/apache/solr/AnalysisAfterCoreReloadTest.java
@@ -32,7 +32,6 @@ import org.junit.Ignore;
 import java.io.File;
 import java.io.IOException;
 
-@Ignore
 public class AnalysisAfterCoreReloadTest extends SolrTestCaseJ4 {
   
   private static String tmpSolrHome;
diff --git a/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java b/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java
index 12a015c..9ceb62b 100644
--- a/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java
+++ b/solr/core/src/test/org/apache/solr/TestHighlightDedupGrouping.java
@@ -31,7 +31,6 @@ import org.junit.Test;
  * Tests that highlighting doesn't break on grouped documents
  * with duplicate unique key fields stored on multiple shards.
  */
-@Ignore // nocommit
 public class TestHighlightDedupGrouping extends BaseDistributedSearchTestCase {
 
   private static final String id_s1 = "id_s1"; // string copy of the id for highlighting
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/AsyncCallRequestStatusResponseTest.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/AsyncCallRequestStatusResponseTest.java
index 090d6a7..d994af8 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/AsyncCallRequestStatusResponseTest.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/AsyncCallRequestStatusResponseTest.java
@@ -50,7 +50,7 @@ public class AsyncCallRequestStatusResponseTest extends SolrCloudTestCase {
 
   @SuppressWarnings("deprecation")
   @Test
-  @Ignore // nocommit - still working on async
+  @Ignore // nocommit - this is flakey in it's wait for the async call to complete, or the system is flakey in reporting
   public void testAsyncCallStatusResponse() throws Exception {
     int numShards = 4;
     int numReplicas = 1;
diff --git a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
index 3aecb92..0f67d1a 100644
--- a/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
+++ b/solr/core/src/test/org/apache/solr/cloud/api/collections/TestCollectionAPI.java
@@ -25,12 +25,15 @@ import java.util.Map;
 import java.util.concurrent.atomic.AtomicLong;
 
 import com.google.common.collect.Lists;
+import org.apache.lucene.util.LuceneTestCase;
+import org.apache.solr.client.solrj.SolrClient;
 import org.apache.solr.client.solrj.SolrQuery;
 import org.apache.solr.client.solrj.SolrRequest;
 import org.apache.solr.client.solrj.SolrServerException;
 import org.apache.solr.client.solrj.impl.BaseHttpSolrClient;
 import org.apache.solr.client.solrj.impl.CloudHttp2SolrClient;
 import org.apache.solr.client.solrj.impl.CloudSolrClient;
+import org.apache.solr.client.solrj.impl.Http2SolrClient;
 import org.apache.solr.client.solrj.request.CollectionAdminRequest;
 import org.apache.solr.client.solrj.request.QueryRequest;
 import org.apache.solr.client.solrj.request.V2Request;
@@ -53,7 +56,7 @@ import org.apache.zookeeper.KeeperException;
 import org.junit.Ignore;
 import org.junit.Test;
 
-@Ignore // nocommit debug - replication is failing to delete local index
+@LuceneTestCase.Nightly
 public class TestCollectionAPI extends ReplicaPropertiesBase {
 
   public static final String COLLECTION_NAME = "testcollection";
@@ -61,6 +64,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
 
   public TestCollectionAPI() throws Exception {
     useFactory(null);
+    System.setProperty("solr.suppressDefaultConfigBootstrap", "false");
     schemaString = "schema15.xml";      // we need a string id
     sliceCount = 2;
     System.setProperty("solr.default.collection_op_timeout", "20000");
@@ -71,6 +75,7 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
   @ShardsFixed(num = 2)
   public void test() throws Exception {
     try (CloudHttp2SolrClient client = createCloudClient(null)) {
+
       CollectionAdminRequest.Create req;
       if (useTlogReplicas()) {
         req = CollectionAdminRequest.createCollection(COLLECTION_NAME, "_default",2, 0, 1, 1);
@@ -97,8 +102,8 @@ public class TestCollectionAPI extends ReplicaPropertiesBase {
     // nocommit debug
 //    replicaPropTest();
 
-    clusterStatusZNodeVersion();
-    testClusterStateMigration();
+    // clusterStatusZNodeVersion(); maybe this relies on stateformat=1?
+   //  testClusterStateMigration(); same
     testCollectionCreationCollectionNameValidation();
     testCollectionCreationTooManyShards();
     testReplicationFactorValidaton();
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
index 7dd39c8..0ddd0b4 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/BaseCloudSolrClient.java
@@ -899,6 +899,7 @@ public abstract class BaseCloudSolrClient extends SolrClient {
         DocCollection coll = getDocCollection(requestedCollection, null);
         if (coll == null) {
           throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Collection not found: " + requestedCollection);
+          // TODO if we are creating it, we wouldn't find it?
         }
         int collVer = coll.getZNodeVersion();
         if (coll.getStateFormat()>1) {
diff --git a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBSolrClient.java b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBSolrClient.java
index 9d31057..1735438 100644
--- a/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBSolrClient.java
+++ b/solr/solrj/src/java/org/apache/solr/client/solrj/impl/LBSolrClient.java
@@ -313,6 +313,9 @@ public abstract class LBSolrClient extends SolrClient {
     // try the servers we previously skipped
     if (skipped != null) {
       for (ServerWrapper wrapper : skipped) {
+        if (wrapper == null) {
+          continue;
+        }
         if (timeAllowedExceeded = isTimeExceeded(timeAllowedNano, timeOutTime)) {
           break;
         }


[lucene-solr] 02/05: @691 Small shutdown order tweak.

Posted by ma...@apache.org.
This is an automated email from the ASF dual-hosted git repository.

markrmiller pushed a commit to branch reference_impl
in repository https://gitbox.apache.org/repos/asf/lucene-solr.git

commit f522de11c650c1efba274740e13e7b02af7f2241
Author: markrmiller@gmail.com <ma...@gmail.com>
AuthorDate: Tue Sep 1 21:49:52 2020 -0500

    @691 Small shutdown order tweak.
---
 solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)

diff --git a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
index 49f278c..f68a56b 100644
--- a/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
+++ b/solr/core/src/java/org/apache/solr/update/UpdateShardHandler.java
@@ -216,7 +216,6 @@ public class UpdateShardHandler implements SolrInfoBean {
     }
     if (updateOnlyClient != null) updateOnlyClient.disableCloseLock();
     try (ParWork closer = new ParWork(this, true)) {
-      closer.collect(recoveryExecutor);
       closer.collect("", () -> {
         HttpClientUtil.close(defaultClient);
         return defaultClient;
@@ -229,7 +228,7 @@ public class UpdateShardHandler implements SolrInfoBean {
         SolrInfoBean.super.close();
         return this;
       });
-
+      closer.collect(recoveryExecutor);
     }
     assert ObjectReleaseTracker.release(this);
   }