You are viewing a plain text version of this content. The canonical link for it is here.
Posted to builds@lucene.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2019/12/19 12:36:55 UTC
[JENKINS] Lucene-Solr-NightlyTests-master - Build # 2049 - Unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/2049/
2 tests failed.
FAILED: org.apache.solr.cloud.RollingRestartTest.test
Error Message:
Timeout occurred while waiting response from server at: https://127.0.0.1:39922/_/cv
Stack Trace:
org.apache.solr.client.solrj.SolrServerException: Timeout occurred while waiting response from server at: https://127.0.0.1:39922/_/cv
at __randomizedtesting.SeedInfo.seed([4BE768247389A4C8:C3B357FEDD75C930]:0)
at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:676)
at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:265)
at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:248)
at org.apache.solr.client.solrj.impl.LBSolrClient.doRequest(LBSolrClient.java:368)
at org.apache.solr.client.solrj.impl.LBSolrClient.request(LBSolrClient.java:296)
at org.apache.solr.client.solrj.impl.BaseCloudSolrClient.sendRequest(BaseCloudSolrClient.java:1143)
at org.apache.solr.client.solrj.impl.BaseCloudSolrClient.requestWithRetryOnStaleState(BaseCloudSolrClient.java:906)
at org.apache.solr.client.solrj.impl.BaseCloudSolrClient.request(BaseCloudSolrClient.java:838)
at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:207)
at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:224)
at org.apache.solr.cloud.RollingRestartTest.restartWithRolesTest(RollingRestartTest.java:74)
at org.apache.solr.cloud.RollingRestartTest.test(RollingRestartTest.java:53)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1754)
at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:942)
at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:978)
at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:992)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1082)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1054)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:951)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:836)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:887)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:898)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:834)
Caused by: java.net.SocketTimeoutException: Read timed out
at java.base/java.net.SocketInputStream.socketRead0(Native Method)
at java.base/java.net.SocketInputStream.socketRead(SocketInputStream.java:115)
at java.base/java.net.SocketInputStream.read(SocketInputStream.java:168)
at java.base/java.net.SocketInputStream.read(SocketInputStream.java:140)
at java.base/sun.security.ssl.SSLSocketInputRecord.read(SSLSocketInputRecord.java:448)
at java.base/sun.security.ssl.SSLSocketInputRecord.bytesInCompletePacket(SSLSocketInputRecord.java:68)
at java.base/sun.security.ssl.SSLSocketImpl.readApplicationRecord(SSLSocketImpl.java:1104)
at java.base/sun.security.ssl.SSLSocketImpl$AppInputStream.read(SSLSocketImpl.java:823)
at org.apache.http.impl.io.SessionInputBufferImpl.streamRead(SessionInputBufferImpl.java:137)
at org.apache.http.impl.io.SessionInputBufferImpl.fillBuffer(SessionInputBufferImpl.java:153)
at org.apache.http.impl.io.SessionInputBufferImpl.readLine(SessionInputBufferImpl.java:280)
at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:138)
at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:56)
at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:259)
at org.apache.http.impl.DefaultBHttpClientConnection.receiveResponseHeader(DefaultBHttpClientConnection.java:163)
at org.apache.http.impl.conn.CPoolProxy.receiveResponseHeader(CPoolProxy.java:157)
at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:273)
at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:125)
at org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:272)
at org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:186)
at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89)
at org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:110)
at org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185)
at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83)
at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56)
at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:564)
... 51 more
FAILED: org.apache.solr.cloud.cdcr.CdcrReplicationHandlerTest.testReplicationWithBufferedUpdates
Error Message:
Timeout while trying to assert number of documents @ source_collection
Stack Trace:
java.lang.AssertionError: Timeout while trying to assert number of documents @ source_collection
at __randomizedtesting.SeedInfo.seed([4BE768247389A4C8:98EE383A361A385F]:0)
at org.apache.solr.cloud.cdcr.BaseCdcrDistributedZkTest.assertNumDocs(BaseCdcrDistributedZkTest.java:277)
at org.apache.solr.cloud.cdcr.CdcrReplicationHandlerTest.testReplicationWithBufferedUpdates(CdcrReplicationHandlerTest.java:233)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1754)
at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:942)
at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:978)
at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:992)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1082)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1054)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:951)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:836)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:887)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:898)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:834)
Caused by: java.lang.AssertionError: expected:<110> but was:<120>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:834)
at org.junit.Assert.assertEquals(Assert.java:645)
at org.junit.Assert.assertEquals(Assert.java:631)
at org.apache.solr.cloud.cdcr.BaseCdcrDistributedZkTest.assertNumDocs(BaseCdcrDistributedZkTest.java:268)
... 41 more
Build Log:
[...truncated 14018 lines...]
[junit4] Suite: org.apache.solr.cloud.cdcr.CdcrReplicationHandlerTest
[junit4] 2> 888511 INFO (SUITE-CdcrReplicationHandlerTest-seed#[4BE768247389A4C8]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> 888512 INFO (SUITE-CdcrReplicationHandlerTest-seed#[4BE768247389A4C8]-worker) [ ] o.a.s.SolrTestCaseJ4 Created dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/data-dir-17-001
[junit4] 2> 888512 WARN (SUITE-CdcrReplicationHandlerTest-seed#[4BE768247389A4C8]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=1 numCloses=1
[junit4] 2> 888512 INFO (SUITE-CdcrReplicationHandlerTest-seed#[4BE768247389A4C8]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 888518 INFO (SUITE-CdcrReplicationHandlerTest-seed#[4BE768247389A4C8]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (false) via: @org.apache.solr.util.RandomizeSSL(reason="", ssl=0.0/0.0, value=0.0/0.0, clientAuth=0.0/0.0)
[junit4] 2> 888521 INFO (SUITE-CdcrReplicationHandlerTest-seed#[4BE768247389A4C8]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
[junit4] 2> 888526 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 888545 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 888545 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 888645 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer start zk server on port:34704
[junit4] 2> 888645 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer waitForServerUp: 127.0.0.1:34704
[junit4] 2> 888645 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:34704
[junit4] 2> 888645 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 34704
[junit4] 2> 888657 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 888689 INFO (zkConnectionManagerCallback-1220-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 888689 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 888699 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 888700 INFO (zkConnectionManagerCallback-1222-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 888700 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 888705 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-cdcr.xml to /configs/conf1/solrconfig.xml
[junit4] 2> 888715 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/schema15.xml to /configs/conf1/schema.xml
[junit4] 2> 888716 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 888717 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
[junit4] 2> 888719 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
[junit4] 2> 888720 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
[junit4] 2> 888721 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
[junit4] 2> 888723 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
[junit4] 2> 888724 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 888725 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
[junit4] 2> 888726 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
[junit4] 2> 888729 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 888730 INFO (zkConnectionManagerCallback-1226-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 888730 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 888832 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.SolrTestCaseJ4 ###Starting testPartialReplicationWithTruncatedTlog
[junit4] 2> 889550 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.h.g.GzipHandler minGzipSize of 0 is inefficient for short content, break even is size 23
[junit4] 2> 889550 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (configured port=0, binding port=0)
[junit4] 2> 889550 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
[junit4] 2> 889550 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.Server jetty-9.4.24.v20191120; built: 2019-11-20T21:37:49.771Z; git: 363d5f2df3a8a28de40604320230664b9c793c16; jvm 11.0.4+10-LTS
[junit4] 2> 889585 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 889585 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 889585 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.session node0 Scavenging every 600000ms
[junit4] 2> 889587 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@4271a0f3{/,null,AVAILABLE}
[junit4] 2> 889588 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@6435e333{ssl,[ssl, alpn, http/1.1, h2]}{127.0.0.1:45869}
[junit4] 2> 889588 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.Server Started @889656ms
[junit4] 2> 889588 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {shards=shard1, hostContext=/, hostPort=45869, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-001/cores}
[junit4] 2> 889589 ERROR (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 889589 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 889589 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 9.0.0
[junit4] 2> 889589 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 889589 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 889589 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-12-19T09:15:19.563412Z
[junit4] 2> 889591 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 889592 INFO (zkConnectionManagerCallback-1228-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 889592 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 889693 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 889693 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-001/solr.xml
[junit4] 2> 889696 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 889696 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 889698 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 889990 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 890005 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@6d796462[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 890005 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@6d796462[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 890030 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@22736a6b[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 890030 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@22736a6b[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 890031 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34704/solr
[junit4] 2> 890051 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 890073 INFO (zkConnectionManagerCallback-1235-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 890073 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 890193 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 890205 INFO (zkConnectionManagerCallback-1237-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 890205 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 890299 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:45869_
[junit4] 2> 890300 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.Overseer Overseer (id=75929743999827973-127.0.0.1:45869_-n_0000000000) starting
[junit4] 2> 890313 INFO (OverseerStateUpdate-75929743999827973-127.0.0.1:45869_-n_0000000000) [n:127.0.0.1:45869_ ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:45869_
[junit4] 2> 890321 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:45869_
[junit4] 2> 890324 INFO (zkCallback-1236-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 890325 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.CoreContainer Not all security plugins configured! authentication=disabled authorization=disabled. Solr is only as secure as you make it. Consider configuring authentication/authorization before exposing Solr to users internal or external. See https://s.apache.org/solrsecurity for more info
[junit4] 2> 890397 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 890448 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 890484 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 890495 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 890502 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.p.PackageLoader /packages.json updated to version -1
[junit4] 2> 890503 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [n:127.0.0.1:45869_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-001/cores
[junit4] 2> 891114 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.h.g.GzipHandler minGzipSize of 0 is inefficient for short content, break even is size 23
[junit4] 2> 891114 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (configured port=0, binding port=0)
[junit4] 2> 891114 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
[junit4] 2> 891114 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.Server jetty-9.4.24.v20191120; built: 2019-11-20T21:37:49.771Z; git: 363d5f2df3a8a28de40604320230664b9c793c16; jvm 11.0.4+10-LTS
[junit4] 2> 891193 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 891193 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 891193 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.session node0 Scavenging every 600000ms
[junit4] 2> 891205 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@b1202be{/,null,AVAILABLE}
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@5126468{ssl,[ssl, alpn, http/1.1, h2]}{127.0.0.1:42246}
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.s.Server Started @891278ms
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {shards=shard2, hostContext=/, hostPort=42246, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-002/cores}
[junit4] 2> 891211 ERROR (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 9.0.0
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 891211 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-12-19T09:15:21.185901Z
[junit4] 2> 891229 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 891245 INFO (zkConnectionManagerCallback-1243-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 891245 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 891347 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 891347 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-002/solr.xml
[junit4] 2> 891350 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 891350 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 891365 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 891585 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 891601 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@5e6602ed[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 891601 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@5e6602ed[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 891624 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@4d2beba3[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 891624 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@4d2beba3[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 891626 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34704/solr
[junit4] 2> 891637 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 891643 INFO (zkConnectionManagerCallback-1250-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 891643 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 891761 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 891773 INFO (zkConnectionManagerCallback-1252-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 891773 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 891777 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 891780 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkController Publish node=127.0.0.1:42246_ as DOWN
[junit4] 2> 891781 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 891781 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:42246_
[junit4] 2> 891783 INFO (zkCallback-1236-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 891784 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.CoreContainer Not all security plugins configured! authentication=disabled authorization=disabled. Solr is only as secure as you make it. Consider configuring authentication/authorization before exposing Solr to users internal or external. See https://s.apache.org/solrsecurity for more info
[junit4] 2> 891801 INFO (zkCallback-1251-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 891825 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 891890 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 891904 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 891904 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 891907 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.p.PackageLoader /packages.json updated to version -1
[junit4] 2> 891909 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-002/cores
[junit4] 2> 892004 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 892013 INFO (zkConnectionManagerCallback-1261-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 892013 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 892014 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 892015 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:34704/solr ready
[junit4] 2> 892200 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=tmp_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 892203 INFO (OverseerThreadFactory-1164-thread-1-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection tmp_collection
[junit4] 2> 892309 WARN (OverseerThreadFactory-1164-thread-1-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to create a collection (tmp_collection) without cores.
[junit4] 2> 892311 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
[junit4] 2> 892337 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=tmp_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&wt=javabin&version=2} status=0 QTime=137
[junit4] 2> 892358 INFO (qtp694697208-6842) [n:127.0.0.1:42246_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:45869_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 892365 INFO (OverseerCollectionConfigSetProcessor-75929743999827973-127.0.0.1:45869_-n_0000000000) [n:127.0.0.1:45869_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 892368 INFO (OverseerThreadFactory-1164-thread-2-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:45869_ for creating new replica of shard shard1 for collection tmp_collection
[junit4] 2> 892370 INFO (OverseerThreadFactory-1164-thread-2-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Returning CreateReplica command.
[junit4] 2> 892484 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n1&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 892485 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 893527 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
[junit4] 2> 893543 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema [tmp_collection_shard1_replica_n1] Schema name=test
[junit4] 2> 893719 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 893791 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.CoreContainer Creating SolrCore 'tmp_collection_shard1_replica_n1' using configuration from collection tmp_collection, trusted=true
[junit4] 2> 893792 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.tmp_collection.shard1.replica_n1' (registry 'solr.core.tmp_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 893792 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [[tmp_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-001/cores/tmp_collection_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-001/cores/tmp_collection_shard1_replica_n1/data/]
[junit4] 2> 893799 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.MockRandomMergePolicy: org.apache.lucene.index.MockRandomMergePolicy@2685ba93
[junit4] 2> 894121 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 894121 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 894122 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 894123 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 894124 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=5, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
[junit4] 2> 894128 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@655cfa70[tmp_collection_shard1_replica_n1] main]
[junit4] 2> 894130 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 894130 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 894131 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 894142 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.CdcrBufferStateManager Created znode /collections/tmp_collection/cdcr/state/buffer
[junit4] 2> 894162 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.CdcrProcessStateManager Created znode /collections/tmp_collection/cdcr/state/process
[junit4] 2> 894195 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1653338982757433344
[junit4] 2> 894198 INFO (searcherExecutor-1178-thread-1-processing-n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 c:tmp_collection s:shard1) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n1] Registered new searcher Searcher@655cfa70[tmp_collection_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 894206 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/tmp_collection/terms/shard1 to Terms{values={core_node2=0}, version=0}
[junit4] 2> 894206 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/tmp_collection/leaders/shard1
[junit4] 2> 894210 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 894210 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 894210 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/
[junit4] 2> 894210 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 894210 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 894210 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/tmp_collection/leaders/shard1/leader after winning as /collections/tmp_collection/leader_elect/shard1/election/75929743999827973-core_node2-n_0000000000
[junit4] 2> 894211 INFO (zkCallback-1236-thread-1) [ ] o.a.s.h.CdcrLeaderStateManager Received new leader state @ tmp_collection:shard1
[junit4] 2> 894221 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/ shard1
[junit4] 2> 894324 INFO (zkCallback-1236-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 894325 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 894327 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n1&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1842
[junit4] 2> 894334 INFO (zkCallback-1236-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 894335 INFO (qtp694697208-6842) [n:127.0.0.1:42246_ c:tmp_collection ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:45869_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1977
[junit4] 2> 894338 INFO (qtp694697208-6843) [n:127.0.0.1:42246_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:42246_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 894345 INFO (OverseerCollectionConfigSetProcessor-75929743999827973-127.0.0.1:45869_-n_0000000000) [n:127.0.0.1:45869_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000002 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 894357 INFO (OverseerThreadFactory-1164-thread-3-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:42246_ for creating new replica of shard shard1 for collection tmp_collection
[junit4] 2> 894359 INFO (OverseerThreadFactory-1164-thread-3-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Returning CreateReplica command.
[junit4] 2> 894429 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n3&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 894535 INFO (zkCallback-1236-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 894535 INFO (zkCallback-1236-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 894545 INFO (zkCallback-1236-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 895456 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
[junit4] 2> 895478 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.IndexSchema [tmp_collection_shard1_replica_n3] Schema name=test
[junit4] 2> 895651 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 895678 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.CoreContainer Creating SolrCore 'tmp_collection_shard1_replica_n3' using configuration from collection tmp_collection, trusted=true
[junit4] 2> 895678 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.tmp_collection.shard1.replica_n3' (registry 'solr.core.tmp_collection.shard1.replica_n3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@62ce575
[junit4] 2> 895679 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrCore [[tmp_collection_shard1_replica_n3] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-002/cores/tmp_collection_shard1_replica_n3], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_4BE768247389A4C8-001/jetty-002/cores/tmp_collection_shard1_replica_n3/data/]
[junit4] 2> 895685 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.MockRandomMergePolicy: org.apache.lucene.index.MockRandomMergePolicy@76938860
[junit4] 2> 895829 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 895829 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 895830 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 895830 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 895831 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=5, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
[junit4] 2> 895832 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@470b37de[tmp_collection_shard1_replica_n3] main]
[junit4] 2> 895838 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 895838 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 895839 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 895876 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1653338984520089600
[junit4] 2> 895880 INFO (searcherExecutor-1184-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n3] Registered new searcher Searcher@470b37de[tmp_collection_shard1_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 895883 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/tmp_collection/terms/shard1 to Terms{values={core_node2=0, core_node4=0}, version=1}
[junit4] 2> 895884 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/tmp_collection/leaders/shard1
[junit4] 2> 895886 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.ZkController Core needs to recover:tmp_collection_shard1_replica_n3
[junit4] 2> 895903 INFO (updateExecutor-1246-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 895904 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
[junit4] 2> 895904 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy startupVersions is empty
[junit4] 2> 895907 INFO (qtp694697208-6841) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n3&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1477
[junit4] 2> 895922 INFO (qtp694697208-6843) [n:127.0.0.1:42246_ c:tmp_collection ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:42246_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1583
[junit4] 2> 895962 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp= path=/admin/ping params={wt=javabin&version=2} hits=0 status=0 QTime=0
[junit4] 2> 895962 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp= path=/admin/ping params={wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 895963 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[tmp_collection_shard1_replica_n3]
[junit4] 2> 895964 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 895964 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Publishing state of core [tmp_collection_shard1_replica_n3] as recovering, leader is [https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/] and I am [https://127.0.0.1:42246/tmp_collection_shard1_replica_n3/]
[junit4] 2> 895976 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp= path=/cdcr params={action=LASTPROCESSEDVERSION&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 896026 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: tmp_collection failOnTimeout:true timeout (sec):330
[junit4] 2> 896029 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Sending prep recovery command to [https://127.0.0.1:45869]; [WaitForState: action=PREPRECOVERY&core=tmp_collection_shard1_replica_n1&nodeName=127.0.0.1:42246_&coreNodeName=core_node4&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:down live:true
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:down live:true
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:down live:true
[junit4] 2> 896034 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node4, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
[junit4] 2> 896034 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:42246_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42246",
[junit4] 2> "node_name":"127.0.0.1:42246_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 896034 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:42246_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42246",
[junit4] 2> "node_name":"127.0.0.1:42246_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 896034 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:42246_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42246",
[junit4] 2> "node_name":"127.0.0.1:42246_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 896127 INFO (zkCallback-1251-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896127 INFO (zkCallback-1236-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896127 INFO (zkCallback-1236-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896127 INFO (zkCallback-1236-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 2> 896129 INFO (watches-1238-thread-1) [ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:42246_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42246",
[junit4] 2> "node_name":"127.0.0.1:42246_",
[junit4] 2> "state":"recovering",
[junit4] 2> "type":"NRT"}
[junit4] 1> replica:core_node4 rstate:recovering live:true
[junit4] 2> 896129 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:42246_&onlyIfLeaderActive=true&core=tmp_collection_shard1_replica_n1&coreNodeName=core_node4&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=95
[junit4] 2> 896130 INFO (zkCallback-1251-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896130 INFO (zkCallback-1251-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:recovering live:true
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:recovering live:true
[junit4] 2> 896349 INFO (OverseerCollectionConfigSetProcessor-75929743999827973-127.0.0.1:45869_-n_0000000000) [n:127.0.0.1:45869_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000004 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 896630 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Attempting to PeerSync from [https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/] - recoveringAfterStartup=[true]
[junit4] 2> 896630 WARN (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.PeerSyncWithLeader no frame of reference to tell if we've missed updates
[junit4] 2> 896630 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy PeerSync Recovery was not successful - trying replication.
[junit4] 2> 896630 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Starting Replication Recovery.
[junit4] 2> 896630 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Attempting to replicate from [https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/].
[junit4] 2> 896639 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1653338985320153088,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 896639 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
[junit4] 2> 896639 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 896686 INFO (qtp694697208-6843) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.p.DistributedUpdateProcessor Ignoring commit while not ACTIVE - state: BUFFERING replay: false
[junit4] 2> 896686 INFO (qtp694697208-6843) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n3] webapp= path=/update params={update.distrib=FROMLEADER&update.chain=cdcr-processor-chain&waitSearcher=true&openSearcher=false&commit=true&softCommit=false&distrib.from=https://127.0.0.1:45869/tmp_collection_shard1_replica_n1/&commit_end_point=replicas&wt=javabin&version=2&expungeDeletes=false} status=0 QTime=0
[junit4] 2> 896688 INFO (qtp661354581-6807) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp= path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&wt=javabin&version=2} status=0 QTime=56
[junit4] 2> 896690 INFO (qtp661354581-6809) [n:127.0.0.1:45869_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp= path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
[junit4] 2> 896690 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Master's generation: 1
[junit4] 2> 896690 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 896690 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Slave's generation: 1
[junit4] 2> 896690 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 896690 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
[junit4] 2> 896705 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@2012943b[tmp_collection_shard1_replica_n3] main]
[junit4] 2> 896707 INFO (searcherExecutor-1184-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n3] Registered new searcher Searcher@2012943b[tmp_collection_shard1_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 896713 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 896713 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Replication Recovery was successful.
[junit4] 2> 896713 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
[junit4] 2> 896715 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Updating version bucket highest from index after successful recovery.
[junit4] 2> 896715 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1653338985399844864
[junit4] 2> 896716 INFO (recoveryExecutor-1248-thread-1-processing-n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42246_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[true]
[junit4] 2> 896816 INFO (zkCallback-1251-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896816 INFO (zkCallback-1251-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896816 INFO (zkCallback-1236-thread-4) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896817 INFO (zkCallback-1236-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896817 INFO (zkCallback-1236-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:active live:true
[junit4] 1> no one is recoverying
[junit4] 2> 896817 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: tmp_collection
[junit4] 2> 896829 INFO (zkCallback-1251-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 896840 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 896841 INFO (zkConnectionManagerCallback-1269-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 896842 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 896844 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 896845 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[4BE768247389A4C8]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:34704/solr ready
[junit4] 2> 896917 INFO (qtp694697208-6839) [n:127.0.0.1:42246_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :delete with params name=tmp_collection&action=DELETE&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 896946 INFO (OverseerThreadFactory-1164-thread-4-processing-n:127.0.0.1:45869_) [n:127.0.0.1:45869_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler Executing Collection Cmd=action=UNLOAD&deleteInstanceDir=true&deleteDataDir=true&deleteMetricsHistory=true, asyncId=null
[junit4] 2> 896980 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.tmp_collection.shard1.replica_n1, tag=null
[junit4] 2> 896980 INFO (qtp661354581-6808) [n:127.0.0.1:45869_ x:tmp_collection_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@6d5a037a: rootName = null, domain = solr.core.tmp_collection.shard1.replica_n1, service url = null, agent id = null] for registry solr.core.tmp_collection.shard1.replica_n1 / com.codahale.metrics.MetricRegistry@655dc57c
[junit4] 2> 897041 INFO (qtp694697208-6842) [n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.tmp_collection.shard1.replica_n3, tag=null
[junit4] 2> 897042 INFO (qtp694697208-6842) [n:127.0.0.1:42246_ x:tmp_collection_shard1_replica_n3 ] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@3719a898: rootName = null, domain = solr.core.tmp_collection.shard1.replica_n3, service url = null, agent id = null] for registry solr.core.tmp_collection.shard1.replica_n3 / com.codahale.metrics.MetricRegistry@54984a3d
[junit4] 2> 897042 WARN (cdcr-update-log-synchronizer-1189-thread-1) [ ] o.a.s.h.CdcrUpdateLogSynchronizer Caught unexpected exception
[junit4] 2> => org.apache.solr.client
[...truncated too long message...]
ines.txt -Dtests.locale=es-CR -Dtests.timezone=Europe/San_Marino -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1
[junit4] ERROR 151s J1 | RollingRestartTest.test <<<
[junit4] > Throwable #1: org.apache.solr.client.solrj.SolrServerException: Timeout occurred while waiting response from server at: https://127.0.0.1:39922/_/cv
[junit4] > at __randomizedtesting.SeedInfo.seed([4BE768247389A4C8:C3B357FEDD75C930]:0)
[junit4] > at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:676)
[junit4] > at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:265)
[junit4] > at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:248)
[junit4] > at org.apache.solr.client.solrj.impl.LBSolrClient.doRequest(LBSolrClient.java:368)
[junit4] > at org.apache.solr.client.solrj.impl.LBSolrClient.request(LBSolrClient.java:296)
[junit4] > at org.apache.solr.client.solrj.impl.BaseCloudSolrClient.sendRequest(BaseCloudSolrClient.java:1143)
[junit4] > at org.apache.solr.client.solrj.impl.BaseCloudSolrClient.requestWithRetryOnStaleState(BaseCloudSolrClient.java:906)
[junit4] > at org.apache.solr.client.solrj.impl.BaseCloudSolrClient.request(BaseCloudSolrClient.java:838)
[junit4] > at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:207)
[junit4] > at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:224)
[junit4] > at org.apache.solr.cloud.RollingRestartTest.restartWithRolesTest(RollingRestartTest.java:74)
[junit4] > at org.apache.solr.cloud.RollingRestartTest.test(RollingRestartTest.java:53)
[junit4] > at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
[junit4] > at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
[junit4] > at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
[junit4] > at java.base/java.lang.reflect.Method.invoke(Method.java:566)
[junit4] > at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1082)
[junit4] > at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1054)
[junit4] > at java.base/java.lang.Thread.run(Thread.java:834)
[junit4] > Caused by: java.net.SocketTimeoutException: Read timed out
[junit4] > at java.base/java.net.SocketInputStream.socketRead0(Native Method)
[junit4] > at java.base/java.net.SocketInputStream.socketRead(SocketInputStream.java:115)
[junit4] > at java.base/java.net.SocketInputStream.read(SocketInputStream.java:168)
[junit4] > at java.base/java.net.SocketInputStream.read(SocketInputStream.java:140)
[junit4] > at java.base/sun.security.ssl.SSLSocketInputRecord.read(SSLSocketInputRecord.java:448)
[junit4] > at java.base/sun.security.ssl.SSLSocketInputRecord.bytesInCompletePacket(SSLSocketInputRecord.java:68)
[junit4] > at java.base/sun.security.ssl.SSLSocketImpl.readApplicationRecord(SSLSocketImpl.java:1104)
[junit4] > at java.base/sun.security.ssl.SSLSocketImpl$AppInputStream.read(SSLSocketImpl.java:823)
[junit4] > at org.apache.http.impl.io.SessionInputBufferImpl.streamRead(SessionInputBufferImpl.java:137)
[junit4] > at org.apache.http.impl.io.SessionInputBufferImpl.fillBuffer(SessionInputBufferImpl.java:153)
[junit4] > at org.apache.http.impl.io.SessionInputBufferImpl.readLine(SessionInputBufferImpl.java:280)
[junit4] > at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:138)
[junit4] > at org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:56)
[junit4] > at org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:259)
[junit4] > at org.apache.http.impl.DefaultBHttpClientConnection.receiveResponseHeader(DefaultBHttpClientConnection.java:163)
[junit4] > at org.apache.http.impl.conn.CPoolProxy.receiveResponseHeader(CPoolProxy.java:157)
[junit4] > at org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:273)
[junit4] > at org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:125)
[junit4] > at org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:272)
[junit4] > at org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:186)
[junit4] > at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89)
[junit4] > at org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:110)
[junit4] > at org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185)
[junit4] > at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83)
[junit4] > at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56)
[junit4] > at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:564)
[junit4] > ... 51 more
[junit4] 2> NOTE: leaving temporary files on disk at: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.RollingRestartTest_4BE768247389A4C8-001
[junit4] 2> Dec 19, 2019 10:39:15 AM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
[junit4] 2> WARNING: Will linger awaiting termination of 1 leaked thread(s).
[junit4] 2> NOTE: test params are: codec=Asserting(Lucene84): {}, docValues:{}, maxPointsInLeafNode=113, maxMBSortInHeap=6.831978556645135, sim=Asserting(org.apache.lucene.search.similarities.AssertingSimilarity@60177352), locale=es-CR, timezone=Europe/San_Marino
[junit4] 2> NOTE: Linux 4.4.0-112-generic amd64/Oracle Corporation 11.0.4 (64-bit)/cpus=4,threads=1,free=220498984,total=512753664
[junit4] 2> NOTE: All tests run in this JVM: [TestDistributedTracing, LeaderElectionContextKeyTest, ChaosMonkeyNothingIsSafeTest, TestFieldTypeResource, TestCloudRecovery2, SuggestComponentContextFilterQueryTest, TestUtilizeNode, TestFunctionQuery, SolrMetricsIntegrationTest, TestBlobHandler, ChaosMonkeySafeLeaderWithPullReplicasTest, CategoryRoutedAliasUpdateProcessorTest, DisMaxRequestHandlerTest, TestDFISimilarityFactory, TestLocalFSCloudBackupRestore, TestFieldCollectionResource, TestNumericTerms32, ExitableDirectoryReaderTest, CdcrRequestHandlerTest, IndexSchemaTest, TestDynamicLoadingUrl, HttpTriggerListenerTest, TestSnapshotCloudManager, CacheHeaderTest, TestPhraseSuggestions, SolrCLIZkUtilsTest, TestSchemalessBufferedUpdates, TestComplexPhraseLeadingWildcard, TestExpandComponent, TestSimComputePlanAction, TestShardHandlerFactory, ShardRoutingTest, TestFastOutputStream, TestPolicyCloud, TestShortCircuitedRequests, TestCustomSort, DocValuesMissingTest, RegexBytesRefFilterTest, TestManagedSynonymGraphFilterFactory, SolrInfoBeanTest, SchemaVersionSpecificBehaviorTest, FastVectorHighlighterTest, PathHierarchyTokenizerFactoryTest, TestInPlaceUpdateWithRouteField, HighlighterTest, TaggerTest, AtomicUpdateProcessorFactoryTest, TestHashPartitioner, ResponseBuilderTest, BadComponentTest, ShardsWhitelistTest, CheckHdfsIndexTest, TestManagedResourceStorage, DistributedVersionInfoTest, MetricsHandlerTest, TestReplicationHandlerBackup, TestHighlightDedupGrouping, LeaderVoteWaitTimeoutTest, TestSortByMinMaxFunction, TestLogWatcher, OverseerStatusTest, TolerantUpdateProcessorTest, ConcurrentDeleteAndCreateCollectionTest, TestCodecSupport, TestDistribIDF, TestUtils, TestSweetSpotSimilarityFactory, CdcrBootstrapTest, BasicFunctionalityTest, FullHLLTest, PackageManagerCLITest, TermVectorComponentTest, NodeMutatorTest, DistributedTermsComponentTest, TestStressCloudBlindAtomicUpdates, RootFieldTest, TestSubQueryTransformerCrossCore, BigEndianAscendingWordSerializerTest, TestSolrXml, TestAtomicUpdateErrorCases, AsyncCallRequestStatusResponseTest, TestRandomFlRTGCloud, SynonymTokenizerTest, TestRTimerTree, TestRandomFaceting, AutoscalingHistoryHandlerTest, OverseerCollectionConfigSetProcessorTest, TestInPlaceUpdatesStandalone, TestCloudPivotFacet, AnalyticsQueryTest, ConfigureRecoveryStrategyTest, CurrencyFieldTypeTest, JavaBinAtomicUpdateMultivalueTest, UniqFieldsUpdateProcessorFactoryTest, BasicDistributedZk2Test, HdfsChaosMonkeyNothingIsSafeTest, TestSolrCloudWithDelegationTokens, PreAnalyzedFieldManagedSchemaCloudTest, TestSlowCompositeReaderWrapper, SolrIndexConfigTest, SignificantTermsQParserPluginTest, StatelessScriptUpdateProcessorFactoryTest, TestWriterPerf, SystemInfoHandlerTest, HttpPartitionOnCommitTest, TestFacetMethods, SolrLogAuditLoggerPluginTest, TestExactSharedStatsCacheCloud, MultiSolrCloudTestCaseTest, LargeFieldTest, ClassificationUpdateProcessorTest, TestLegacyNumericRangeQueryBuilder, TestUseDocValuesAsStored2, TestLRUStatsCache, TestTlogReplica, BasicAuthIntegrationTest, TestDistributedSearch, TestCloudDeleteByQuery, ReplaceNodeNoTargetTest, RecoveryZkTest, RulesTest, LeaderFailoverAfterPartitionTest, TestLeaderElectionZkExpiry, TestSolrJ, UpdateParamsTest, TriggerIntegrationTest, TestCloudJSONFacetSKG, TestPullReplica, QueryParsingTest, IndexSizeTriggerTest, TestCoreContainer, DistributedMLTComponentTest, AddBlockUpdateTest, TestSimPolicyCloud, TestManagedSchemaAPI, TestStressReorder, TestSurroundQueryParser, SolrCoreTest, TestSmileRequest, ConjunctionSolrSpellCheckerTest, TestAnalyzedSuggestions, NoCacheHeaderTest, TestCloudManagedSchema, RequiredFieldsTest, MoveReplicaHDFSTest, ReplicationFactorTest, RollingRestartTest]
[junit4] Completed [480/891 (2!)] on J1 in 151.83s, 1 test, 1 error <<< FAILURES!
[...truncated 54269 lines...]
[JENKINS] Lucene-Solr-NightlyTests-master - Build # 2050 - Still
Unstable
Posted by Apache Jenkins Server <je...@builds.apache.org>.
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/2050/
3 tests failed.
FAILED: org.apache.solr.cloud.api.collections.ShardSplitTest.testSplitWithChaosMonkey
Error Message:
Address already in use
Stack Trace:
java.net.BindException: Address already in use
at __randomizedtesting.SeedInfo.seed([975BF56478DCBE23:1C7C26B539DA15A7]:0)
at java.base/sun.nio.ch.Net.bind0(Native Method)
at java.base/sun.nio.ch.Net.bind(Net.java:461)
at java.base/sun.nio.ch.Net.bind(Net.java:453)
at java.base/sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:227)
at java.base/sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:80)
at org.eclipse.jetty.server.ServerConnector.openAcceptChannel(ServerConnector.java:342)
at org.eclipse.jetty.server.ServerConnector.open(ServerConnector.java:307)
at org.eclipse.jetty.server.AbstractNetworkConnector.doStart(AbstractNetworkConnector.java:80)
at org.eclipse.jetty.server.ServerConnector.doStart(ServerConnector.java:231)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:72)
at org.eclipse.jetty.server.Server.doStart(Server.java:385)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:72)
at org.apache.solr.client.solrj.embedded.JettySolrRunner.retryOnPortBindFailure(JettySolrRunner.java:565)
at org.apache.solr.client.solrj.embedded.JettySolrRunner.start(JettySolrRunner.java:504)
at org.apache.solr.client.solrj.embedded.JettySolrRunner.start(JettySolrRunner.java:472)
at org.apache.solr.cloud.api.collections.ShardSplitTest.testSplitWithChaosMonkey(ShardSplitTest.java:499)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1754)
at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:942)
at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:978)
at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:992)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1082)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1054)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:951)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:836)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:887)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:898)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:834)
FAILED: junit.framework.TestSuite.org.apache.solr.cloud.api.collections.ShardSplitTest
Error Message:
10 threads leaked from SUITE scope at org.apache.solr.cloud.api.collections.ShardSplitTest: 1) Thread[id=105133, name=qtp1503050327-105133, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 2) Thread[id=105134, name=qtp1503050327-105134, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 3) Thread[id=105127, name=qtp1503050327-105127, state=RUNNABLE, group=TGRP-ShardSplitTest] at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method) at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120) at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124) at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 4) Thread[id=105135, name=Session-HouseKeeper-2e463cf8-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 5) Thread[id=105128, name=qtp1503050327-105128, state=RUNNABLE, group=TGRP-ShardSplitTest] at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method) at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120) at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124) at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 6) Thread[id=105129, name=qtp1503050327-105129-acceptor-0@4f7a13b8-ServerConnector@5da935bd{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:44397}, state=RUNNABLE, group=TGRP-ShardSplitTest] at java.base@11.0.4/sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method) at java.base@11.0.4/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:533) at java.base@11.0.4/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:285) at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:385) at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:701) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 7) Thread[id=105204, name=Connector-Scheduler-5da935bd-1, state=WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 8) Thread[id=105131, name=qtp1503050327-105131, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 9) Thread[id=105130, name=qtp1503050327-105130, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 10) Thread[id=105132, name=qtp1503050327-105132, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: 10 threads leaked from SUITE scope at org.apache.solr.cloud.api.collections.ShardSplitTest:
1) Thread[id=105133, name=qtp1503050327-105133, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
2) Thread[id=105134, name=qtp1503050327-105134, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
3) Thread[id=105127, name=qtp1503050327-105127, state=RUNNABLE, group=TGRP-ShardSplitTest]
at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method)
at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
4) Thread[id=105135, name=Session-HouseKeeper-2e463cf8-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
5) Thread[id=105128, name=qtp1503050327-105128, state=RUNNABLE, group=TGRP-ShardSplitTest]
at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method)
at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
6) Thread[id=105129, name=qtp1503050327-105129-acceptor-0@4f7a13b8-ServerConnector@5da935bd{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:44397}, state=RUNNABLE, group=TGRP-ShardSplitTest]
at java.base@11.0.4/sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method)
at java.base@11.0.4/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:533)
at java.base@11.0.4/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:285)
at app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:385)
at app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:701)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
7) Thread[id=105204, name=Connector-Scheduler-5da935bd-1, state=WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
8) Thread[id=105131, name=qtp1503050327-105131, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
9) Thread[id=105130, name=qtp1503050327-105130, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
10) Thread[id=105132, name=qtp1503050327-105132, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
at __randomizedtesting.SeedInfo.seed([975BF56478DCBE23]:0)
FAILED: junit.framework.TestSuite.org.apache.solr.cloud.api.collections.ShardSplitTest
Error Message:
There are still zombie threads that couldn't be terminated: 1) Thread[id=105133, name=qtp1503050327-105133, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 2) Thread[id=105204, name=Connector-Scheduler-5da935bd-1, state=WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 3) Thread[id=105131, name=qtp1503050327-105131, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 4) Thread[id=105134, name=qtp1503050327-105134, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 5) Thread[id=105127, name=qtp1503050327-105127, state=RUNNABLE, group=TGRP-ShardSplitTest] at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method) at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120) at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124) at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 6) Thread[id=105135, name=Session-HouseKeeper-2e463cf8-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114) at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 7) Thread[id=105130, name=qtp1503050327-105130, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 8) Thread[id=105132, name=qtp1503050327-105132, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 9) Thread[id=105128, name=qtp1503050327-105128, state=RUNNABLE, group=TGRP-ShardSplitTest] at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method) at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120) at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124) at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source) at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834) 10) Thread[id=105129, name=qtp1503050327-105129, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method) at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234) at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123) at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie threads that couldn't be terminated:
1) Thread[id=105133, name=qtp1503050327-105133, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
2) Thread[id=105204, name=Connector-Scheduler-5da935bd-1, state=WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
3) Thread[id=105131, name=qtp1503050327-105131, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
4) Thread[id=105134, name=qtp1503050327-105134, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
5) Thread[id=105127, name=qtp1503050327-105127, state=RUNNABLE, group=TGRP-ShardSplitTest]
at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method)
at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
6) Thread[id=105135, name=Session-HouseKeeper-2e463cf8-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
7) Thread[id=105130, name=qtp1503050327-105130, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
8) Thread[id=105132, name=qtp1503050327-105132, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
9) Thread[id=105128, name=qtp1503050327-105128, state=RUNNABLE, group=TGRP-ShardSplitTest]
at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method)
at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
10) Thread[id=105129, name=qtp1503050327-105129, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
at __randomizedtesting.SeedInfo.seed([975BF56478DCBE23]:0)
Build Log:
[...truncated 15685 lines...]
[junit4] Suite: org.apache.solr.cloud.api.collections.ShardSplitTest
[junit4] 2> 6196725 INFO (SUITE-ShardSplitTest-seed#[975BF56478DCBE23]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> 6196727 INFO (SUITE-ShardSplitTest-seed#[975BF56478DCBE23]-worker) [ ] o.a.s.SolrTestCaseJ4 Created dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/data-dir-226-001
[junit4] 2> 6196727 WARN (SUITE-ShardSplitTest-seed#[975BF56478DCBE23]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=81 numCloses=81
[junit4] 2> 6196727 INFO (SUITE-ShardSplitTest-seed#[975BF56478DCBE23]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 6196728 INFO (SUITE-ShardSplitTest-seed#[975BF56478DCBE23]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl="https://issues.apache.org/jira/browse/SOLR-5776")
[junit4] 2> 6196740 INFO (SUITE-ShardSplitTest-seed#[975BF56478DCBE23]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
[junit4] 2> 6196748 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 6196760 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 6196761 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 6196850 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer start zk server on port:45723
[junit4] 2> 6196850 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer waitForServerUp: 127.0.0.1:45723
[junit4] 2> 6196850 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:45723
[junit4] 2> 6196850 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 45723
[junit4] 2> 6196852 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 6196862 INFO (zkConnectionManagerCallback-15383-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 6196862 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 6196864 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 6196865 INFO (zkConnectionManagerCallback-15385-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 6196865 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 6196866 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
[junit4] 2> 6196869 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/schema15.xml to /configs/conf1/schema.xml
[junit4] 2> 6196883 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 6196885 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
[junit4] 2> 6196886 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
[junit4] 2> 6196896 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
[junit4] 2> 6196898 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
[junit4] 2> 6196907 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
[junit4] 2> 6196909 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 6196910 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
[junit4] 2> 6196920 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
[junit4] 2> 6196921 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly asked otherwise
[junit4] 2> 6198235 WARN (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.h.g.GzipHandler minGzipSize of 0 is inefficient for short content, break even is size 23
[junit4] 2> 6198235 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (configured port=0, binding port=0)
[junit4] 2> 6198235 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
[junit4] 2> 6198235 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.Server jetty-9.4.24.v20191120; built: 2019-11-20T21:37:49.771Z; git: 363d5f2df3a8a28de40604320230664b9c793c16; jvm 11.0.4+10-LTS
[junit4] 2> 6198236 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 6198236 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 6198236 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 6198237 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@47f1790c{/,null,AVAILABLE}
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@7ee7ee54{HTTP/1.1,[http/1.1, h2c]}{127.0.0.1:39083}
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.s.Server Started @6198305ms
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/, solr.data.dir=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/tempDir-001/control/data, hostPort=39083, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/../../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/control-001/cores, replicaType=NRT}
[junit4] 2> 6198250 ERROR (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 9.0.0
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 6198250 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2019-12-20T23:28:38.906567Z
[junit4] 2> 6198251 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 6198252 INFO (zkConnectionManagerCallback-15387-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 6198252 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 6198354 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 6198354 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/control-001/solr.xml
[junit4] 2> 6198356 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 6198356 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 6198358 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@1af134, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 6198628 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 6198629 WARN (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@30152872[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 6198629 WARN (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@30152872[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 6198649 WARN (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@6a901391[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 6198649 WARN (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@6a901391[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 6198650 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:45723/solr
[junit4] 2> 6198651 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 6198665 INFO (zkConnectionManagerCallback-15394-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 6198665 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 6198767 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 6198768 INFO (zkConnectionManagerCallback-15396-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 6198768 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 6198942 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:39083_
[junit4] 2> 6198942 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.Overseer Overseer (id=75938761680224260-127.0.0.1:39083_-n_0000000000) starting
[junit4] 2> 6198959 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Process current queue of overseer operations
[junit4] 2> 6198960 INFO (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:39083_
[junit4] 2> 6198960 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39083_
[junit4] 2> 6198961 INFO (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 6198962 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue. #Running tasks: 0 #Completed tasks: 0
[junit4] 2> 6198962 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor RunningTasks: []
[junit4] 2> 6198962 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 6198962 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 6198962 INFO (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks: []
[junit4] 2> 6198967 WARN (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.CoreContainer Not all security plugins configured! authentication=disabled authorization=disabled. Solr is only as secure as you make it. Consider configuring authentication/authorization before exposing Solr to users internal or external. See https://s.apache.org/solrsecurity for more info
[junit4] 2> 6199021 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 6199043 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1af134
[junit4] 2> 6199053 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1af134
[junit4] 2> 6199053 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1af134
[junit4] 2> 6199055 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.p.PackageLoader /packages.json updated to version -1
[junit4] 2> 6199056 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [n:127.0.0.1:39083_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/../../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/control-001/cores
[junit4] 2> 6199090 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 6199091 INFO (zkConnectionManagerCallback-15405-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 6199091 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 6199092 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 6199093 INFO (TEST-ShardSplitTest.testSplitStaticIndexReplicationLink-seed#[975BF56478DCBE23]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:45723/solr ready
[junit4] 2> 6199095 INFO (qtp1861252594-102919) [n:127.0.0.1:39083_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:39083_&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Got 1 tasks from work-queue : [[org.apache.solr.cloud.OverseerTaskQueue$QueueEvent@2e684ecf]]
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Marked task [/overseer/collection-queue-work/qn-0000000000] as running
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Overseer Collection Message Handler: Get the message id:/overseer/collection-queue-work/qn-0000000000 message:{
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:39083_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 6199097 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Runner processing /overseer/collection-queue-work/qn-0000000000
[junit4] 2> 6199097 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler OverseerCollectionMessageHandler.processMessage : create , {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:39083_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue. #Running tasks: 1 #Completed tasks: 0
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor RunningTasks: [/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 6199097 DEBUG (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 6199097 INFO (OverseerCollectionConfigSetProcessor-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks: [/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 6199098 INFO (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection control_collection
[junit4] 2> 6199098 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler creating collections conf node /collections/control_collection
[junit4] 2> 6199099 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.CreateCollectionCmd Check for collection zkNode:control_collection
[junit4] 2> 6199099 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.CreateCollectionCmd Collection zkNode exists
[junit4] 2> 6199122 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:39083_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"} current state version: 0
[junit4] 2> 6199122 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ClusterStateMutator building a new cName: control_collection
[junit4] 2> 6199123 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ZkStateWriter going to create_collection /collections/control_collection/state.json
[junit4] 2> 6199222 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.CreateCollectionCmd Creating SolrCores for new collection control_collection, shardNames [shard1] , message : {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:39083_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 6199224 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.CreateCollectionCmd Creating core control_collection_shard1_replica_n1 as part of shard shard1 of collection control_collection on 127.0.0.1:39083_
[junit4] 2> 6199238 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ x:control_collection_shard1_replica_n1 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 6199238 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ x:control_collection_shard1_replica_n1 ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 6199241 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"down",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"} current state version: 0
[junit4] 2> 6199241 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ReplicaMutator Update state numShards=1 message={
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"down",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"}
[junit4] 2> 6199242 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ReplicaMutator Will update state for replica: core_node2:{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 6199242 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ReplicaMutator Collection is now: DocCollection(control_collection//collections/control_collection/state.json/0)={
[junit4] 2> "pullReplicas":"0",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "router":{"name":"compositeId"},
[junit4] 2> "maxShardsPerNode":"1",
[junit4] 2> "autoAddReplicas":"false",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "tlogReplicas":"0",
[junit4] 2> "shards":{"shard1":{
[junit4] 2> "range":"80000000-7fffffff",
[junit4] 2> "state":"active",
[junit4] 2> "replicas":{"core_node2":{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}}}}}
[junit4] 2> 6199342 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ZkStateWriter going to update_collection /collections/control_collection/state.json version: 0
[junit4] 2> 6200262 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
[junit4] 2> 6200301 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
[junit4] 2> 6200360 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 6200403 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' using configuration from collection control_collection, trusted=true
[junit4] 2> 6200404 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.replica_n1' (registry 'solr.core.control_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@1af134
[junit4] 2> 6200404 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/control-001/cores/control_collection_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/../../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_975BF56478DCBE23-001/control-001/cores/control_collection_shard1_replica_n1/data/]
[junit4] 2> 6200420 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=41, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.5740786203152833]
[junit4] 2> 6200436 WARN (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A, b=B}}}
[junit4] 2> 6200508 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 6200508 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 6200511 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 6200511 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 6200512 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=81134429, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
[junit4] 2> 6200513 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.s.SolrIndexSearcher Opening [Searcher@5bc4b760[control_collection_shard1_replica_n1] main]
[junit4] 2> 6200515 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 6200515 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 6200515 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 6200516 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1653483263672451072
[junit4] 2> 6200518 INFO (searcherExecutor-13364-thread-1-processing-n:127.0.0.1:39083_ x:control_collection_shard1_replica_n1 c:control_collection s:shard1) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [control_collection_shard1_replica_n1] Registered new searcher Searcher@5bc4b760[control_collection_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 6200534 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/control_collection/terms/shard1 to Terms{values={core_node2=0}, version=0}
[junit4] 2> 6200534 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/control_collection/leaders/shard1
[junit4] 2> 6200550 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 6200550 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 6200550 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39083/control_collection_shard1_replica_n1/
[junit4] 2> 6200550 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 6200550 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy http://127.0.0.1:39083/control_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 6200550 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/control_collection/leaders/shard1/leader after winning as /collections/control_collection/leader_elect/shard1/election/75938761680224260-core_node2-n_0000000000
[junit4] 2> 6200552 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39083/control_collection_shard1_replica_n1/ shard1
[junit4] 2> 6200552 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "operation":"leader",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "state":"active"} current state version: 0
[junit4] 2> 6200653 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ZkStateWriter going to update_collection /collections/control_collection/state.json version: 1
[junit4] 2> 6200653 INFO (zkCallback-15395-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 6200653 INFO (zkCallback-15395-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 6200654 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 6200655 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "core_node_name":"core_node2",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"active",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"} current state version: 0
[junit4] 2> 6200655 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ReplicaMutator Update state numShards=1 message={
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "core_node_name":"core_node2",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"active",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"}
[junit4] 2> 6200655 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ReplicaMutator Will update state for replica: core_node2:{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "state":"active",
[junit4] 2> "type":"NRT",
[junit4] 2> "leader":"true"}
[junit4] 2> 6200655 DEBUG (OverseerStateUpdate-75938761680224260-127.0.0.1:39083_-n_0000000000) [n:127.0.0.1:39083_ ] o.a.s.c.o.ReplicaMutator Collection is now: DocCollection(control_collection//collections/control_collection/state.json/2)={
[junit4] 2> "pullReplicas":"0",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "router":{"name":"compositeId"},
[junit4] 2> "maxShardsPerNode":"1",
[junit4] 2> "autoAddReplicas":"false",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "tlogReplicas":"0",
[junit4] 2> "shards":{"shard1":{
[junit4] 2> "range":"80000000-7fffffff",
[junit4] 2> "state":"active",
[junit4] 2> "replicas":{"core_node2":{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:39083",
[junit4] 2> "node_name":"127.0.0.1:39083_",
[junit4] 2> "state":"active",
[junit4] 2> "type":"NRT",
[junit4] 2> "leader":"true"}}}}}
[junit4] 2> 6200656 INFO (qtp1861252594-102921) [n:127.0.0.1:39083_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1430
[junit4] 2> 6200656 DEBUG (OverseerThreadFactory-13359-thread-1-processing-n:127.0.0.1:39083_) [n:127.0.0.1:39083_ ] o.a.s.c.a.c.CreateCollectionCmd Fin
[...truncated too long message...]
va.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
[junit4] > at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
[junit4] > at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
[junit4] > at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 3) Thread[id=105131, name=qtp1503050327-105131, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
[junit4] > at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 4) Thread[id=105134, name=qtp1503050327-105134, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
[junit4] > at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 5) Thread[id=105127, name=qtp1503050327-105127, state=RUNNABLE, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method)
[junit4] > at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
[junit4] > at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
[junit4] > at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
[junit4] > at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
[junit4] > at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
[junit4] > at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 6) Thread[id=105135, name=Session-HouseKeeper-2e463cf8-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
[junit4] > at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
[junit4] > at java.base@11.0.4/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
[junit4] > at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
[junit4] > at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
[junit4] > at java.base@11.0.4/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 7) Thread[id=105130, name=qtp1503050327-105130, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
[junit4] > at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 8) Thread[id=105132, name=qtp1503050327-105132, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
[junit4] > at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 9) Thread[id=105128, name=qtp1503050327-105128, state=RUNNABLE, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/sun.nio.ch.EPoll.wait(Native Method)
[junit4] > at java.base@11.0.4/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
[junit4] > at java.base@11.0.4/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
[junit4] > at java.base@11.0.4/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
[junit4] > at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
[junit4] > at app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
[junit4] > at app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
[junit4] > at app//org.eclipse.jetty.io.ManagedSelector$$Lambda$385/0x0000000100529040.run(Unknown Source)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > 10) Thread[id=105129, name=qtp1503050327-105129, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at java.base@11.0.4/jdk.internal.misc.Unsafe.park(Native Method)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
[junit4] > at java.base@11.0.4/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
[junit4] > at app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.base@11.0.4/java.lang.Thread.run(Thread.java:834)
[junit4] > at __randomizedtesting.SeedInfo.seed([975BF56478DCBE23]:0)
[junit4] Completed [574/892 (1!)] on J1 in 631.59s, 11 tests, 3 errors <<< FAILURES!
[...truncated 53878 lines...]