You are viewing a plain text version of this content. The canonical link for it is here.
Posted to builds@lucene.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2020/07/23 18:29:44 UTC
[JENKINS] Lucene-Solr-NightlyTests-8.6 - Build # 23 - Still
Unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-8.6/23/
3 tests failed.
FAILED: org.apache.solr.cloud.api.collections.ShardSplitTest.testSplitWithChaosMonkey
Error Message:
Address already in use
Stack Trace:
java.net.BindException: Address already in use
at __randomizedtesting.SeedInfo.seed([E7328758635BF3AA:6C155489225D582E]:0)
at sun.nio.ch.Net.bind0(Native Method)
at sun.nio.ch.Net.bind(Net.java:433)
at sun.nio.ch.Net.bind(Net.java:425)
at sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:220)
at sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:85)
at org.eclipse.jetty.server.ServerConnector.openAcceptChannel(ServerConnector.java:342)
at org.eclipse.jetty.server.ServerConnector.open(ServerConnector.java:307)
at org.eclipse.jetty.server.AbstractNetworkConnector.doStart(AbstractNetworkConnector.java:80)
at org.eclipse.jetty.server.ServerConnector.doStart(ServerConnector.java:231)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:72)
at org.eclipse.jetty.server.Server.doStart(Server.java:385)
at org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:72)
at org.apache.solr.client.solrj.embedded.JettySolrRunner.retryOnPortBindFailure(JettySolrRunner.java:566)
at org.apache.solr.client.solrj.embedded.JettySolrRunner.start(JettySolrRunner.java:504)
at org.apache.solr.client.solrj.embedded.JettySolrRunner.start(JettySolrRunner.java:472)
at org.apache.solr.cloud.api.collections.ShardSplitTest.testSplitWithChaosMonkey(ShardSplitTest.java:505)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750)
at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938)
at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974)
at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1090)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1061)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:947)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:832)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:883)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:894)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED: junit.framework.TestSuite.org.apache.solr.cloud.api.collections.ShardSplitTest
Error Message:
10 threads leaked from SUITE scope at org.apache.solr.cloud.api.collections.ShardSplitTest: 1) Thread[id=79195, name=Connector-Scheduler-8d0920-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) 2) Thread[id=79111, name=qtp78814574-79111, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 3) Thread[id=79107, name=qtp78814574-79107-acceptor-0@7ec6adcb-ServerConnector@8d0920{HTTP/1.1, (http/1.1, h2c)}{127.0.0.1:44431}, state=RUNNABLE, group=TGRP-ShardSplitTest] at sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method) at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:419) at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:247) at org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:385) at org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:702) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.lang.Thread.run(Thread.java:748) 4) Thread[id=79112, name=qtp78814574-79112, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 5) Thread[id=79108, name=qtp78814574-79108, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 6) Thread[id=79106, name=qtp78814574-79106, state=RUNNABLE, group=TGRP-ShardSplitTest] at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method) at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269) at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93) at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.lang.Thread.run(Thread.java:748) 7) Thread[id=79109, name=qtp78814574-79109, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 8) Thread[id=79105, name=qtp78814574-79105, state=RUNNABLE, group=TGRP-ShardSplitTest] at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method) at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269) at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93) at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.lang.Thread.run(Thread.java:748) 9) Thread[id=79113, name=Session-HouseKeeper-6c445f77-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) 10) Thread[id=79110, name=qtp78814574-79110, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748)
Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: 10 threads leaked from SUITE scope at org.apache.solr.cloud.api.collections.ShardSplitTest:
1) Thread[id=79195, name=Connector-Scheduler-8d0920-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809)
at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
2) Thread[id=79111, name=qtp78814574-79111, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
3) Thread[id=79107, name=qtp78814574-79107-acceptor-0@7ec6adcb-ServerConnector@8d0920{HTTP/1.1, (http/1.1, h2c)}{127.0.0.1:44431}, state=RUNNABLE, group=TGRP-ShardSplitTest]
at sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method)
at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:419)
at sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:247)
at org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:385)
at org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:702)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.lang.Thread.run(Thread.java:748)
4) Thread[id=79112, name=qtp78814574-79112, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
5) Thread[id=79108, name=qtp78814574-79108, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
6) Thread[id=79106, name=qtp78814574-79106, state=RUNNABLE, group=TGRP-ShardSplitTest]
at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method)
at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.lang.Thread.run(Thread.java:748)
7) Thread[id=79109, name=qtp78814574-79109, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
8) Thread[id=79105, name=qtp78814574-79105, state=RUNNABLE, group=TGRP-ShardSplitTest]
at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method)
at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.lang.Thread.run(Thread.java:748)
9) Thread[id=79113, name=Session-HouseKeeper-6c445f77-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809)
at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
10) Thread[id=79110, name=qtp78814574-79110, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
at __randomizedtesting.SeedInfo.seed([E7328758635BF3AA]:0)
FAILED: junit.framework.TestSuite.org.apache.solr.cloud.api.collections.ShardSplitTest
Error Message:
There are still zombie threads that couldn't be terminated: 1) Thread[id=79195, name=Connector-Scheduler-8d0920-1, state=WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1081) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) 2) Thread[id=79111, name=qtp78814574-79111, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 3) Thread[id=79108, name=qtp78814574-79108, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 4) Thread[id=79106, name=qtp78814574-79106, state=RUNNABLE, group=TGRP-ShardSplitTest] at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method) at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269) at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93) at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.lang.Thread.run(Thread.java:748) 5) Thread[id=79109, name=qtp78814574-79109, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 6) Thread[id=79107, name=qtp78814574-79107, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 7) Thread[id=79112, name=qtp78814574-79112, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748) 8) Thread[id=79105, name=qtp78814574-79105, state=RUNNABLE, group=TGRP-ShardSplitTest] at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method) at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269) at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93) at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97) at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472) at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135) at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938) at java.lang.Thread.run(Thread.java:748) 9) Thread[id=79113, name=Session-HouseKeeper-6c445f77-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093) at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809) at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) 10) Thread[id=79110, name=qtp78814574-79110, state=TIMED_WAITING, group=TGRP-ShardSplitTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078) at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875) at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925) at java.lang.Thread.run(Thread.java:748)
Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie threads that couldn't be terminated:
1) Thread[id=79195, name=Connector-Scheduler-8d0920-1, state=WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1081)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809)
at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
2) Thread[id=79111, name=qtp78814574-79111, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
3) Thread[id=79108, name=qtp78814574-79108, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
4) Thread[id=79106, name=qtp78814574-79106, state=RUNNABLE, group=TGRP-ShardSplitTest]
at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method)
at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.lang.Thread.run(Thread.java:748)
5) Thread[id=79109, name=qtp78814574-79109, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
6) Thread[id=79107, name=qtp78814574-79107, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
7) Thread[id=79112, name=qtp78814574-79112, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
8) Thread[id=79105, name=qtp78814574-79105, state=RUNNABLE, group=TGRP-ShardSplitTest]
at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method)
at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
at java.lang.Thread.run(Thread.java:748)
9) Thread[id=79113, name=Session-HouseKeeper-6c445f77-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093)
at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809)
at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
10) Thread[id=79110, name=qtp78814574-79110, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
at sun.misc.Unsafe.park(Native Method)
at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
at java.lang.Thread.run(Thread.java:748)
at __randomizedtesting.SeedInfo.seed([E7328758635BF3AA]:0)
Build Log:
[...truncated 15360 lines...]
[junit4] Suite: org.apache.solr.cloud.api.collections.ShardSplitTest
[junit4] 2> 3852763 INFO (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.SolrTestCase Setting 'solr.default.confdir' system property to test-framework derived value of '/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/server/solr/configsets/_default/conf'
[junit4] 2> 3852764 INFO (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.SolrTestCaseJ4 Created dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/data-dir-277-001
[junit4] 2> 3852764 WARN (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=276 numCloses=276
[junit4] 2> 3852764 INFO (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false
[junit4] 2> 3852766 INFO (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
[junit4] 2> 3852766 INFO (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> 3852766 INFO (SUITE-ShardSplitTest-seed#[E7328758635BF3AA]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /qw_i/pn
[junit4] 2> 3852769 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 3852770 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer client port: 0.0.0.0/0.0.0.0:0
[junit4] 2> 3852770 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 3852870 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer start zk server on port: 45781
[junit4] 2> 3852870 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer waitForServerUp: 127.0.0.1:45781
[junit4] 2> 3852870 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:45781
[junit4] 2> 3852870 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 45781
[junit4] 2> 3852871 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3852874 INFO (zkConnectionManagerCallback-34655-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3852874 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3852879 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3852882 INFO (zkConnectionManagerCallback-34657-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3852883 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3852887 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
[junit4] 2> 3852888 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/schema15.xml to /configs/conf1/schema.xml
[junit4] 2> 3852889 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 3852890 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
[junit4] 2> 3852891 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
[junit4] 2> 3852892 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
[junit4] 2> 3852893 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
[junit4] 2> 3852894 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
[junit4] 2> 3852895 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 3852896 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
[junit4] 2> 3852897 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
[junit4] 2> 3852898 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly asked otherwise
[junit4] 2> 3852988 WARN (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.h.g.GzipHandler minGzipSize of 0 is inefficient for short content, break even is size 23
[junit4] 2> 3852988 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (configured port=0, binding port=0)
[junit4] 2> 3852988 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 2 ...
[junit4] 2> 3852988 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.Server jetty-9.4.27.v20200227; built: 2020-02-27T18:37:21.340Z; git: a304fd9f351f337e7c0e2a7c28878dd536149c6c; jvm 1.8.0_252-b09
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.session node0 Scavenging every 600000ms
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@2edb75f5{/qw_i/pn,null,AVAILABLE}
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@8481d53{HTTP/1.1, (http/1.1, h2c)}{127.0.0.1:40705}
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.s.Server Started @3853023ms
[junit4] 2> 3852989 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/tempDir-001/control/data, replicaType=NRT, hostContext=/qw_i/pn, hostPort=40705, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/control-001/cores}
[junit4] 2> 3852990 ERROR (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 3852990 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 3852990 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 8.6.1
[junit4] 2> 3852990 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 3852990 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr
[junit4] 2> 3852990 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2020-07-23T17:34:24.088Z
[junit4] 2> 3852993 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3852995 INFO (zkConnectionManagerCallback-34659-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3852995 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3853097 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 3853097 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/control-001/solr.xml
[junit4] 2> 3853100 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 3853100 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 3853101 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@64109b22, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 3853688 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 3853689 WARN (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@2ad93ec[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3853689 WARN (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@2ad93ec[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3853692 WARN (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@401de771[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3853692 WARN (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@401de771[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3853693 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:45781/solr
[junit4] 2> 3853697 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3853701 INFO (zkConnectionManagerCallback-34670-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3853701 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3853803 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3853804 INFO (zkConnectionManagerCallback-34672-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3853804 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3853859 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:40705_qw_i%2Fpn
[junit4] 2> 3853859 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.Overseer Overseer (id=72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) starting
[junit4] 2> 3853863 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:40705_qw_i%2Fpn
[junit4] 2> 3853863 INFO (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:40705_qw_i%2Fpn
[junit4] 2> 3853863 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Process current queue of overseer operations
[junit4] 2> 3853864 INFO (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3853865 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue. #Running tasks: 0 #Completed tasks: 0
[junit4] 2> 3853865 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor RunningTasks: []
[junit4] 2> 3853865 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 3853865 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 3853866 INFO (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor RunningZKTasks: []
[junit4] 2> 3853866 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.p.PackageLoader /packages.json updated to version -1
[junit4] 2> 3853866 WARN (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.CoreContainer Not all security plugins configured! authentication=disabled authorization=disabled. Solr is only as secure as you make it. Consider configuring authentication/authorization before exposing Solr to users internal or external. See https://s.apache.org/solrsecurity for more info
[junit4] 2> 3853885 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 3853905 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@64109b22
[junit4] 2> 3853913 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@64109b22
[junit4] 2> 3853913 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@64109b22
[junit4] 2> 3853914 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/control-001/cores
[junit4] 2> 3853924 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3853925 INFO (zkConnectionManagerCallback-34689-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3853925 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3853926 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3853926 INFO (TEST-ShardSplitTest.testSplitAfterFailedSplit2-seed#[E7328758635BF3AA]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:45781/solr ready
[junit4] 2> 3853927 INFO (qtp696225264-76648) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:40705_qw_i%252Fpn&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 3853929 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Got 1 tasks from work-queue : [[org.apache.solr.cloud.OverseerTaskQueue$QueueEvent@2e684ecf]]
[junit4] 2> 3853929 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Marked task [/overseer/collection-queue-work/qn-0000000000] as running
[junit4] 2> 3853929 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Overseer Collection Message Handler: Get the message id: /overseer/collection-queue-work/qn-0000000000 message: {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 3853933 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Runner processing /overseer/collection-queue-work/qn-0000000000
[junit4] 2> 3853933 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.OverseerCollectionMessageHandler OverseerCollectionMessageHandler.processMessage : create , {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 3853934 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue. #Running tasks: 1 #Completed tasks: 0
[junit4] 2> 3853934 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor RunningTasks: [/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 3853934 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 3853934 DEBUG (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 3853934 INFO (OverseerCollectionConfigSetProcessor-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor RunningZKTasks: [/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 3853934 INFO (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.CreateCollectionCmd Create collection control_collection
[junit4] 2> 3853934 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.OverseerCollectionMessageHandler creating collections conf node /collections/control_collection
[junit4] 2> 3853934 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.CreateCollectionCmd Check for collection zkNode: control_collection
[junit4] 2> 3853935 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.CreateCollectionCmd Collection zkNode exists
[junit4] 2> 3853936 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"} current state version: 0
[junit4] 2> 3853936 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ClusterStateMutator building a new cName: control_collection
[junit4] 2> 3853937 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ZkStateWriter going to create_collection /collections/control_collection/state.json
[junit4] 2> 3854037 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.Assign shardnames [shard1] NRT 1 TLOG 0 PULL 0 , policy null, nodeList [127.0.0.1:40705_qw_i%2Fpn]
[junit4] 2> 3854039 INFO (qtp696225264-76650) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 3854039 INFO (qtp696225264-76650) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CONTAINER.cores&wt=javabin&version=2&group=solr.node} status=0 QTime=0
[junit4] 2> 3854041 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.CreateCollectionCmd Creating SolrCores for new collection control_collection, shardNames [shard1] , message : {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 3854042 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.CreateCollectionCmd Creating core control_collection_shard1_replica_n1 as part of shard shard1 of collection control_collection on 127.0.0.1:40705_qw_i%2Fpn
[junit4] 2> 3854043 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn x:control_collection_shard1_replica_n1 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 3854045 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"down",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"} current state version: 0
[junit4] 2> 3854045 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ReplicaMutator Update state numShards=1 message={
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"down",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"}
[junit4] 2> 3854046 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ReplicaMutator Will update state for replica: core_node2:{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 3854046 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ReplicaMutator Collection is now: DocCollection(control_collection//collections/control_collection/state.json/0)={
[junit4] 2> "pullReplicas":"0",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "router":{"name":"compositeId"},
[junit4] 2> "maxShardsPerNode":"1",
[junit4] 2> "autoAddReplicas":"false",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "tlogReplicas":"0",
[junit4] 2> "shards":{"shard1":{
[junit4] 2> "range":"80000000-7fffffff",
[junit4] 2> "state":"active",
[junit4] 2> "replicas":{"core_node2":{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}}}}}
[junit4] 2> 3854147 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ZkStateWriter going to update_collection /collections/control_collection/state.json version: 0
[junit4] 2> 3855055 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.6.1
[junit4] 2> 3855069 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema Schema name=test
[junit4] 2> 3855147 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 3855165 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' using configuration from configset conf1, trusted=true
[junit4] 2> 3855165 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.replica_n1' (registry 'solr.core.control_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@64109b22
[junit4] 2> 3855165 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/control-001/cores/control_collection_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.ShardSplitTest_E7328758635BF3AA-001/control-001/cores/control_collection_shard1_replica_n1/data/]
[junit4] 2> 3855169 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=16, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0]
[junit4] 2> 3855191 WARN (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 3855237 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 3855237 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 3855238 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 3855238 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 3855239 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=19, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8964368909406777]
[junit4] 2> 3855241 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 3855241 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 3855241 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 ms
[junit4] 2> 3855241 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1673029921107083264
[junit4] 2> 3855244 INFO (searcherExecutor-34691-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn x:control_collection_shard1_replica_n1 c:control_collection s:shard1) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [control_collection_shard1_replica_n1] Registered new searcher autowarm time: 0 ms
[junit4] 2> 3855247 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/control_collection/terms/shard1 to Terms{values={core_node2=0}, version=0}
[junit4] 2> 3855247 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/control_collection/leaders/shard1
[junit4] 2> 3855249 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 3855249 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 3855249 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:40705/qw_i/pn/control_collection_shard1_replica_n1/
[junit4] 2> 3855249 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 3855249 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy http://127.0.0.1:40705/qw_i/pn/control_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 3855249 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/control_collection/leaders/shard1/leader after winning as /collections/control_collection/leader_elect/shard1/election/72186908250800132-core_node2-n_0000000000
[junit4] 2> 3855251 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:40705/qw_i/pn/control_collection_shard1_replica_n1/ shard1
[junit4] 2> 3855251 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "operation":"leader",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "state":"active"} current state version: 0
[junit4] 2> 3855352 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ZkStateWriter going to update_collection /collections/control_collection/state.json version: 1
[junit4] 2> 3855352 INFO (zkCallback-34671-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 3855352 INFO (zkCallback-34671-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 3855353 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn c:control_collection s:shard1 x:control_collection_shard1_replica_n1 ] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 3855354 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.Overseer processMessage: queueSize: 1, message = {
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "core_node_name":"core_node2",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"active",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"} current state version: 0
[junit4] 2> 3855354 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ReplicaMutator Update state numShards=1 message={
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "core_node_name":"core_node2",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"active",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"}
[junit4] 2> 3855354 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ReplicaMutator Will update state for replica: core_node2:{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "state":"active",
[junit4] 2> "type":"NRT",
[junit4] 2> "leader":"true"}
[junit4] 2> 3855354 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ReplicaMutator Collection is now: DocCollection(control_collection//collections/control_collection/state.json/2)={
[junit4] 2> "pullReplicas":"0",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "router":{"name":"compositeId"},
[junit4] 2> "maxShardsPerNode":"1",
[junit4] 2> "autoAddReplicas":"false",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "tlogReplicas":"0",
[junit4] 2> "shards":{"shard1":{
[junit4] 2> "range":"80000000-7fffffff",
[junit4] 2> "state":"active",
[junit4] 2> "replicas":{"core_node2":{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:40705/qw_i/pn",
[junit4] 2> "node_name":"127.0.0.1:40705_qw_i%2Fpn",
[junit4] 2> "state":"active",
[junit4] 2> "type":"NRT",
[junit4] 2> "leader":"true"}}}}}
[junit4] 2> 3855355 INFO (qtp696225264-76651) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1311
[junit4] 2> 3855355 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.a.c.CreateCollectionCmd Finished create command on all shards for collection: control_collection
[junit4] 2> 3855355 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Completed task:[/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 3855355 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Marked task [/overseer/collection-queue-work/qn-0000000000] as completed.
[junit4] 2> 3855355 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor RunningTasks: []
[junit4] 2> 3855356 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 3855356 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor CompletedTasks: [/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 3855356 INFO (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor RunningZKTasks: [/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 3855356 DEBUG (OverseerThreadFactory-34679-thread-1-processing-n:127.0.0.1:40705_qw_i%2Fpn) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.OverseerTaskProcessor Overseer Collection Message Handler: Message id: /overseer/collection-queue-work/qn-0000000000 complete, response: {success={127.0.0.1:40705_qw_i%2Fpn={responseHeader={status=0,QTime=1311},core=control_collection_shard1_replica_n1}}}
[junit4] 2> 3855357 INFO (qtp696225264-76648) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
[junit4] 2> 3855455 DEBUG (OverseerStateUpdate-72186908250800132-127.0.0.1:40705_qw_i%2Fpn-n_0000000000) [n:127.0.0.1:40705_qw_i%2Fpn ] o.a.s.c.o.ZkStateWriter going to update_collection /collections/control_collection/state.json version: 2
[junit4] 2> 3855455 INFO (zkCallback-34671-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 3855455 INFO (zkCallback-34671-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live
[...truncated too long message...]
(ThreadPoolExecutor.java:1074)
[junit4] > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
[junit4] > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 2) Thread[id=79111, name=qtp78814574-79111, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 3) Thread[id=79108, name=qtp78814574-79108, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 4) Thread[id=79106, name=qtp78814574-79106, state=RUNNABLE, group=TGRP-ShardSplitTest]
[junit4] > at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method)
[junit4] > at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
[junit4] > at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
[junit4] > at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
[junit4] > at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
[junit4] > at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101)
[junit4] > at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
[junit4] > at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
[junit4] > at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 5) Thread[id=79109, name=qtp78814574-79109, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 6) Thread[id=79107, name=qtp78814574-79107, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 7) Thread[id=79112, name=qtp78814574-79112, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 8) Thread[id=79105, name=qtp78814574-79105, state=RUNNABLE, group=TGRP-ShardSplitTest]
[junit4] > at sun.nio.ch.EPollArrayWrapper.epollWait(Native Method)
[junit4] > at sun.nio.ch.EPollArrayWrapper.poll(EPollArrayWrapper.java:269)
[junit4] > at sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:93)
[junit4] > at sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:86)
[junit4] > at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:97)
[junit4] > at sun.nio.ch.SelectorImpl.select(SelectorImpl.java:101)
[junit4] > at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:472)
[junit4] > at org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:409)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:360)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:184)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:171)
[junit4] > at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:135)
[junit4] > at org.eclipse.jetty.io.ManagedSelector$$Lambda$66/508422401.run(Unknown Source)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:806)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:938)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 9) Thread[id=79113, name=Session-HouseKeeper-6c445f77-1, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1093)
[junit4] > at java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:809)
[junit4] > at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
[junit4] > at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
[junit4] > at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > 10) Thread[id=79110, name=qtp78814574-79110, state=TIMED_WAITING, group=TGRP-ShardSplitTest]
[junit4] > at sun.misc.Unsafe.park(Native Method)
[junit4] > at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215)
[junit4] > at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2078)
[junit4] > at org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:875)
[junit4] > at org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:925)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] > at __randomizedtesting.SeedInfo.seed([E7328758635BF3AA]:0)
[junit4] Completed [571/914 (1!)] on J0 in 633.83s, 11 tests, 3 errors <<< FAILURES!
[...truncated 53081 lines...]
[JENKINS] Lucene-Solr-NightlyTests-8.6 - Build # 24 - Still
Unstable
Posted by Apache Jenkins Server <je...@builds.apache.org>.
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-8.6/24/
1 tests failed.
FAILED: org.apache.solr.cloud.cdcr.CdcrReplicationHandlerTest.testReplicationWithBufferedUpdates
Error Message:
Timeout while trying to assert number of documents @ source_collection
Stack Trace:
java.lang.AssertionError: Timeout while trying to assert number of documents @ source_collection
at __randomizedtesting.SeedInfo.seed([2769CD055B49FDBE:F4609D1B1EDA6129]:0)
at org.apache.solr.cloud.cdcr.BaseCdcrDistributedZkTest.assertNumDocs(BaseCdcrDistributedZkTest.java:278)
at org.apache.solr.cloud.cdcr.CdcrReplicationHandlerTest.testReplicationWithBufferedUpdates(CdcrReplicationHandlerTest.java:233)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750)
at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938)
at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974)
at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1090)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1061)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:947)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:832)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:883)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:894)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Caused by: java.lang.AssertionError: expected:<580> but was:<590>
at org.junit.Assert.fail(Assert.java:88)
at org.junit.Assert.failNotEquals(Assert.java:834)
at org.junit.Assert.assertEquals(Assert.java:645)
at org.junit.Assert.assertEquals(Assert.java:631)
at org.apache.solr.cloud.cdcr.BaseCdcrDistributedZkTest.assertNumDocs(BaseCdcrDistributedZkTest.java:269)
... 42 more
Build Log:
[...truncated 15354 lines...]
[junit4] Suite: org.apache.solr.cloud.cdcr.CdcrReplicationHandlerTest
[junit4] 2> 3805555 INFO (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.SolrTestCase Setting 'solr.default.confdir' system property to test-framework derived value of '/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/server/solr/configsets/_default/conf'
[junit4] 2> 3805555 INFO (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.SolrTestCaseJ4 Created dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/data-dir-209-001
[junit4] 2> 3805555 WARN (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=539 numCloses=539
[junit4] 2> 3805555 INFO (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false
[junit4] 2> 3805557 INFO (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 2> 3805557 INFO (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> 3805558 INFO (SUITE-CdcrReplicationHandlerTest-seed#[2769CD055B49FDBE]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /sp_/
[junit4] 2> 3805563 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 3805563 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer client port: 0.0.0.0/0.0.0.0:0
[junit4] 2> 3805563 INFO (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 3805663 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer start zk server on port: 34893
[junit4] 2> 3805664 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer waitForServerUp: 127.0.0.1:34893
[junit4] 2> 3805664 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:34893
[junit4] 2> 3805664 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 34893
[junit4] 2> 3805665 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3805670 INFO (zkConnectionManagerCallback-30477-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3805670 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3805674 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3805675 INFO (zkConnectionManagerCallback-30479-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3805675 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3805676 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-cdcr.xml to /configs/conf1/solrconfig.xml
[junit4] 2> 3805677 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/schema15.xml to /configs/conf1/schema.xml
[junit4] 2> 3805679 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 3805680 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
[junit4] 2> 3805681 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
[junit4] 2> 3805682 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
[junit4] 2> 3805683 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
[junit4] 2> 3805684 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
[junit4] 2> 3805686 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 3805687 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
[junit4] 2> 3805688 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
[junit4] 2> 3805690 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3805690 INFO (zkConnectionManagerCallback-30483-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3805690 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3805792 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.SolrTestCaseJ4 ###Starting testPartialReplication
[junit4] 2> 3805893 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.h.g.GzipHandler minGzipSize of 0 is inefficient for short content, break even is size 23
[junit4] 2> 3805893 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (configured port=0, binding port=0)
[junit4] 2> 3805893 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 2 ...
[junit4] 2> 3805893 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.Server jetty-9.4.27.v20200227; built: 2020-02-27T18:37:21.340Z; git: a304fd9f351f337e7c0e2a7c28878dd536149c6c; jvm 1.8.0_252-b09
[junit4] 2> 3805894 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 3805894 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 3805894 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 3805894 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@68875e4b{/sp_,null,AVAILABLE}
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@53910398{SSL, (ssl, http/1.1)}{127.0.0.1:43603}
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.Server Started @3805921ms
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {shards=shard1, hostContext=/sp_, hostPort=43603, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-001/cores}
[junit4] 2> 3805895 ERROR (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 8.6.1
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr
[junit4] 2> 3805895 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2020-07-24T23:44:33.351Z
[junit4] 2> 3805896 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3805897 INFO (zkConnectionManagerCallback-30485-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3805897 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3805998 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 3805998 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-001/solr.xml
[junit4] 2> 3806002 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 3806002 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 3806003 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 3806503 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 3806503 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
[junit4] 2> 3806504 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@2cffbd8[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3806504 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@2cffbd8[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3806507 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
[junit4] 2> 3806507 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@2cb7c4ce[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3806507 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@2cb7c4ce[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3806509 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34893/solr
[junit4] 2> 3806509 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3806510 INFO (zkConnectionManagerCallback-30496-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3806510 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3806612 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3806613 INFO (zkConnectionManagerCallback-30498-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3806613 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3806672 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:43603_sp_
[junit4] 2> 3806673 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.Overseer Overseer (id=72194026060185605-127.0.0.1:43603_sp_-n_0000000000) starting
[junit4] 2> 3806678 INFO (OverseerStateUpdate-72194026060185605-127.0.0.1:43603_sp_-n_0000000000) [n:127.0.0.1:43603_sp_ ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:43603_sp_
[junit4] 2> 3806678 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:43603_sp_
[junit4] 2> 3806680 INFO (OverseerStateUpdate-72194026060185605-127.0.0.1:43603_sp_-n_0000000000) [n:127.0.0.1:43603_sp_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3806681 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.p.PackageLoader /packages.json updated to version -1
[junit4] 2> 3806682 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.CoreContainer Not all security plugins configured! authentication=disabled authorization=disabled. Solr is only as secure as you make it. Consider configuring authentication/authorization before exposing Solr to users internal or external. See https://s.apache.org/solrsecurity for more info
[junit4] 2> 3806697 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 3806717 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3806726 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3806726 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3806728 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:43603_sp_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-001/cores
[junit4] 2> 3806840 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.h.g.GzipHandler minGzipSize of 0 is inefficient for short content, break even is size 23
[junit4] 2> 3806840 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.e.JettySolrRunner Start Jetty (configured port=0, binding port=0)
[junit4] 2> 3806840 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 2 ...
[junit4] 2> 3806840 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.Server jetty-9.4.27.v20200227; built: 2020-02-27T18:37:21.340Z; git: a304fd9f351f337e7c0e2a7c28878dd536149c6c; jvm 1.8.0_252-b09
[junit4] 2> 3806842 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 3806842 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 3806842 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 3806842 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@59979c78{/sp_,null,AVAILABLE}
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@35875da1{SSL, (ssl, http/1.1)}{127.0.0.1:42193}
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.Server Started @3806869ms
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {shards=shard2, hostContext=/sp_, hostPort=42193, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-002/cores}
[junit4] 2> 3806843 ERROR (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 8.6.1
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr
[junit4] 2> 3806843 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2020-07-24T23:44:34.299Z
[junit4] 2> 3806844 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3806845 INFO (zkConnectionManagerCallback-30512-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3806845 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3806947 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 3806947 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-002/solr.xml
[junit4] 2> 3806950 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 3806950 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 3806951 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 3807605 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized: WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 3807605 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
[junit4] 2> 3807606 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@6f44a64d[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3807606 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@6f44a64d[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3807608 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
[junit4] 2> 3807609 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config Trusting all certificates configured for Client@5027be8e[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3807609 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for Client@5027be8e[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 3807610 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34893/solr
[junit4] 2> 3807611 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3807612 INFO (zkConnectionManagerCallback-30523-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3807612 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3807714 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3807715 INFO (zkConnectionManagerCallback-30525-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3807715 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3807719 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3807722 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.ZkController Publish node=127.0.0.1:42193_sp_ as DOWN
[junit4] 2> 3807722 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 3807722 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:42193_sp_
[junit4] 2> 3807723 INFO (zkCallback-30497-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 3807723 INFO (zkCallback-30524-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 3807725 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.p.PackageLoader /packages.json updated to version -1
[junit4] 2> 3807725 WARN (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.CoreContainer Not all security plugins configured! authentication=disabled authorization=disabled. Solr is only as secure as you make it. Consider configuring authentication/authorization before exposing Solr to users internal or external. See https://s.apache.org/solrsecurity for more info
[junit4] 2> 3807740 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 3807776 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3807789 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3807789 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3807790 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [n:127.0.0.1:42193_sp_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-002/cores
[junit4] 2> 3807806 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3807806 INFO (zkConnectionManagerCallback-30538-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3807807 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3807807 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 3807808 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:34893/solr ready
[junit4] 2> 3807845 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=tmp_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 3807847 INFO (OverseerThreadFactory-30505-thread-1-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection tmp_collection
[junit4] 2> 3807950 WARN (OverseerThreadFactory-30505-thread-1-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to create a collection (tmp_collection) without cores.
[junit4] 2> 3807952 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
[junit4] 2> 3807953 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=tmp_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&wt=javabin&version=2} status=0 QTime=108
[junit4] 2> 3807954 INFO (qtp1353287241-117963) [n:127.0.0.1:42193_sp_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:43603_sp_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 3807956 INFO (OverseerCollectionConfigSetProcessor-72194026060185605-127.0.0.1:43603_sp_-n_0000000000) [n:127.0.0.1:43603_sp_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 3807960 INFO (qtp1353287241-117966) [n:127.0.0.1:42193_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CONTAINER.cores&wt=javabin&version=2&group=solr.node} status=0 QTime=0
[junit4] 2> 3807994 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 3807994 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CONTAINER.cores&wt=javabin&version=2&group=solr.node} status=0 QTime=0
[junit4] 2> 3807996 INFO (OverseerThreadFactory-30505-thread-2-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:43603_sp_ for creating new replica of shard shard1 for collection tmp_collection
[junit4] 2> 3807997 INFO (OverseerThreadFactory-30505-thread-2-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Returning CreateReplica command.
[junit4] 2> 3808031 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n1&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 3809045 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.6.1
[junit4] 2> 3809055 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema Schema name=test
[junit4] 2> 3809133 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 3809150 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.CoreContainer Creating SolrCore 'tmp_collection_shard1_replica_n1' using configuration from configset conf1, trusted=true
[junit4] 2> 3809151 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.tmp_collection.shard1.replica_n1' (registry 'solr.core.tmp_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3809151 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [[tmp_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-001/cores/tmp_collection_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-001/cores/tmp_collection_shard1_replica_n1/data/]
[junit4] 2> 3809153 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=3, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0]
[junit4] 2> 3809227 WARN (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.CdcrRequestHandler CDCR (in its current form) is deprecated as of 8.6 and shall be removed in 9.0. See SOLR-14022 for details.
[junit4] 2> 3809228 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 3809228 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 3809229 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 3809229 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 3809230 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=33, maxMergeAtOnceExplicit=45, maxMergedSegmentMB=62.203125, floorSegmentMB=1.4404296875, forceMergeDeletesPctAllowed=25.276226664890142, segmentsPerTier=49.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8277244402935122, deletesPctAllowed=32.313598991839
[junit4] 2> 3809232 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 3809233 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 3809233 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 ms
[junit4] 2> 3809235 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.CdcrBufferStateManager Created znode /collections/tmp_collection/cdcr/state/buffer
[junit4] 2> 3809235 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.h.CdcrProcessStateManager Created znode /collections/tmp_collection/cdcr/state/process
[junit4] 2> 3809236 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1673143807316590592
[junit4] 2> 3809238 INFO (searcherExecutor-30540-thread-1-processing-n:127.0.0.1:43603_sp_ x:tmp_collection_shard1_replica_n1 c:tmp_collection s:shard1) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n1] Registered new searcher autowarm time: 0 ms
[junit4] 2> 3809240 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/tmp_collection/terms/shard1 to Terms{values={core_node2=0}, version=0}
[junit4] 2> 3809240 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/tmp_collection/leaders/shard1
[junit4] 2> 3809254 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 3809254 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 3809254 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/
[junit4] 2> 3809254 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 3809254 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.SyncStrategy https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 3809254 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/tmp_collection/leaders/shard1/leader after winning as /collections/tmp_collection/leader_elect/shard1/election/72194026060185605-core_node2-n_0000000000
[junit4] 2> 3809255 INFO (zkCallback-30497-thread-1) [ ] o.a.s.h.CdcrLeaderStateManager Received new leader state @ tmp_collection:shard1
[junit4] 2> 3809260 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/ shard1
[junit4] 2> 3809366 INFO (zkCallback-30497-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3809366 INFO (zkCallback-30497-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3809367 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 3809369 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n1&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1337
[junit4] 2> 3809374 INFO (qtp1353287241-117963) [n:127.0.0.1:42193_sp_ c:tmp_collection ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:43603_sp_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1419
[junit4] 2> 3809375 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:42193_sp_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 3809377 INFO (OverseerCollectionConfigSetProcessor-72194026060185605-127.0.0.1:43603_sp_-n_0000000000) [n:127.0.0.1:43603_sp_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000002 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 3809383 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CONTAINER.cores&wt=javabin&version=2&group=solr.node} status=0 QTime=0
[junit4] 2> 3809387 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.tmp_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=1
[junit4] 2> 3809388 INFO (qtp1920697653-117927) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CONTAINER.cores&wt=javabin&version=2&group=solr.node} status=0 QTime=0
[junit4] 2> 3809390 INFO (qtp1920697653-117928) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.tmp_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
[junit4] 2> 3809390 INFO (OverseerThreadFactory-30505-thread-3-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:42193_sp_ for creating new replica of shard shard1 for collection tmp_collection
[junit4] 2> 3809391 INFO (OverseerThreadFactory-30505-thread-3-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Returning CreateReplica command.
[junit4] 2> 3809428 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n3&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 3809532 INFO (zkCallback-30497-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3809532 INFO (zkCallback-30497-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3809533 INFO (zkCallback-30497-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3810441 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.6.1
[junit4] 2> 3810452 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.IndexSchema Schema name=test
[junit4] 2> 3810522 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 3810541 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.CoreContainer Creating SolrCore 'tmp_collection_shard1_replica_n3' using configuration from configset conf1, trusted=true
[junit4] 2> 3810542 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.tmp_collection.shard1.replica_n3' (registry 'solr.core.tmp_collection.shard1.replica_n3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4e833ed5
[junit4] 2> 3810542 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrCore [[tmp_collection_shard1_replica_n3] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-002/cores/tmp_collection_shard1_replica_n3], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001/jetty-002/cores/tmp_collection_shard1_replica_n3/data/]
[junit4] 2> 3810546 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=3, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0]
[junit4] 2> 3810629 WARN (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.CdcrRequestHandler CDCR (in its current form) is deprecated as of 8.6 and shall be removed in 9.0. See SOLR-14022 for details.
[junit4] 2> 3810630 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 3810630 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 3810631 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 3810631 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 3810632 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=33, maxMergeAtOnceExplicit=45, maxMergedSegmentMB=62.203125, floorSegmentMB=1.4404296875, forceMergeDeletesPctAllowed=25.276226664890142, segmentsPerTier=49.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.8277244402935122, deletesPctAllowed=32.313598991839
[junit4] 2> 3810634 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 3810634 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 3810635 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.ReplicationHandler Commits will be reserved for 10000 ms
[junit4] 2> 3810636 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1673143808784596992
[junit4] 2> 3810638 INFO (searcherExecutor-30547-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n3] Registered new searcher autowarm time: 0 ms
[junit4] 2> 3810640 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp=/sp_ path=/cdcr params={action=LASTPROCESSEDVERSION&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 3810641 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/tmp_collection/terms/shard1 to Terms{values={core_node2=0, core_node4=0}, version=1}
[junit4] 2> 3810641 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/tmp_collection/leaders/shard1
[junit4] 2> 3810644 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.ZkController Core needs to recover:tmp_collection_shard1_replica_n3
[junit4] 2> 3810644 INFO (updateExecutor-30519-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 3810644 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
[junit4] 2> 3810646 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy startupVersions is empty
[junit4] 2> 3810646 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=tmp_collection_shard1_replica_n3&action=CREATE&collection=tmp_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1218
[junit4] 2> 3810648 INFO (qtp1920697653-117928) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp=/sp_ path=/admin/ping params={wt=javabin&version=2} hits=0 status=0 QTime=0
[junit4] 2> 3810648 INFO (qtp1920697653-117928) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp=/sp_ path=/admin/ping params={wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 3810648 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[tmp_collection_shard1_replica_n3]
[junit4] 2> 3810648 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 3810649 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Publishing state of core [tmp_collection_shard1_replica_n3] as recovering, leader is [https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/] and I am [https://127.0.0.1:42193/sp_/tmp_collection_shard1_replica_n3/]
[junit4] 2> 3810649 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ c:tmp_collection ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:42193_sp_&action=ADDREPLICA&collection=tmp_collection&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1274
[junit4] 2> 3810649 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Sending prep recovery command to [https://127.0.0.1:43603/sp_]; [WaitForState: action=PREPRECOVERY&core=tmp_collection_shard1_replica_n1&nodeName=127.0.0.1:42193_sp_&coreNodeName=core_node4&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 2> 3810651 INFO (qtp1920697653-117927) [n:127.0.0.1:43603_sp_ x:tmp_collection_shard1_replica_n1 ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node4, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
[junit4] 2> 3810652 INFO (qtp1920697653-117927) [n:127.0.0.1:43603_sp_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:42193_sp_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42193/sp_",
[junit4] 2> "node_name":"127.0.0.1:42193_sp_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 3810652 INFO (qtp1920697653-117927) [n:127.0.0.1:43603_sp_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:42193_sp_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42193/sp_",
[junit4] 2> "node_name":"127.0.0.1:42193_sp_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 3810652 INFO (qtp1920697653-117927) [n:127.0.0.1:43603_sp_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:42193_sp_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42193/sp_",
[junit4] 2> "node_name":"127.0.0.1:42193_sp_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}
[junit4] 2> 3810750 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: tmp_collection failOnTimeout: true timeout (sec):
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:down live:true
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:down live:true
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:down live:true
[junit4] 2> 3810752 INFO (zkCallback-30497-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3810752 INFO (zkCallback-30497-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3810752 INFO (zkCallback-30497-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3810752 INFO (zkCallback-30524-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3810753 INFO (zkCallback-30524-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3810753 INFO (zkCallback-30524-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:recovering live:true
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:recovering live:true
[junit4] 2> 3810753 INFO (watches-30499-thread-2) [n:127.0.0.1:43603_sp_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:42193_sp_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42193/sp_",
[junit4] 2> "node_name":"127.0.0.1:42193_sp_",
[junit4] 2> "state":"recovering",
[junit4] 2> "type":"NRT"}
[junit4] 2> 3810753 INFO (watches-30499-thread-1) [n:127.0.0.1:43603_sp_ ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=tmp_collection, shard=shard1, thisCore=tmp_collection_shard1_replica_n1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:42193_sp_, coreNodeName=core_node4, onlyIfActiveCheckResult=false, nodeProps: core_node4:{
[junit4] 2> "core":"tmp_collection_shard1_replica_n3",
[junit4] 2> "base_url":"https://127.0.0.1:42193/sp_",
[junit4] 2> "node_name":"127.0.0.1:42193_sp_",
[junit4] 2> "state":"recovering",
[junit4] 2> "type":"NRT"}
[junit4] 2> 3810753 INFO (qtp1920697653-117927) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:42193_sp_&onlyIfLeaderActive=true&core=tmp_collection_shard1_replica_n1&coreNodeName=core_node4&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=101
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:recovering live:true
[junit4] 2> 3811254 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Attempting to PeerSync from [https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/] - recoveringAfterStartup=[true]
[junit4] 2> 3811254 WARN (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.PeerSyncWithLeader no frame of reference to tell if we've missed updates
[junit4] 2> 3811254 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy PeerSync Recovery was not successful - trying replication.
[junit4] 2> 3811254 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Starting Replication Recovery.
[junit4] 2> 3811254 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Attempting to replicate from [https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/].
[junit4] 2> 3811294 INFO (qtp1353287241-117965) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.p.DistributedUpdateProcessor Ignoring commit while not ACTIVE - state: BUFFERING replay: false
[junit4] 2> 3811294 INFO (qtp1353287241-117965) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n3] webapp=/sp_ path=/update params={update.distrib=FROMLEADER&update.chain=cdcr-processor-chain&waitSearcher=true&openSearcher=false&commit=true&softCommit=false&distrib.from=https://127.0.0.1:43603/sp_/tmp_collection_shard1_replica_n1/&commit_end_point=replicas&wt=javabin&version=2&expungeDeletes=false} status=0 QTime=0
[junit4] 2> 3811295 INFO (qtp1920697653-117929) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp=/sp_ path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&wt=javabin&version=2} status=0 QTime=39
[junit4] 2> 3811296 INFO (qtp1920697653-117930) [n:127.0.0.1:43603_sp_ c:tmp_collection s:shard1 r:core_node2 x:tmp_collection_shard1_replica_n1 ] o.a.s.c.S.Request [tmp_collection_shard1_replica_n1] webapp=/sp_ path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
[junit4] 2> 3811296 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Master's generation: 1
[junit4] 2> 3811296 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 3811296 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Slave's generation: 1
[junit4] 2> 3811296 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 3811296 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
[junit4] 2> 3811300 INFO (searcherExecutor-30547-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n3] Registered new searcher autowarm time: 0 ms
[junit4] 2> 3811301 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 3811301 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Replication Recovery was successful.
[junit4] 2> 3811301 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
[junit4] 2> 3811301 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Updating version bucket highest from index after successful recovery.
[junit4] 2> 3811301 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1673143809481900032
[junit4] 2> 3811302 INFO (recoveryExecutor-30521-thread-1-processing-n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 c:tmp_collection s:shard1 r:core_node4) [n:127.0.0.1:42193_sp_ c:tmp_collection s:shard1 r:core_node4 x:tmp_collection_shard1_replica_n3 ] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[true]
[junit4] 2> 3811378 INFO (OverseerCollectionConfigSetProcessor-72194026060185605-127.0.0.1:43603_sp_-n_0000000000) [n:127.0.0.1:43603_sp_ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000004 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 3811404 INFO (zkCallback-30497-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811404 INFO (zkCallback-30497-thread-4) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811404 INFO (zkCallback-30497-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811404 INFO (zkCallback-30524-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811404 INFO (zkCallback-30524-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811404 INFO (zkCallback-30524-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 1> replica:core_node2 rstate:active live:true
[junit4] 1> replica:core_node4 rstate:active live:true
[junit4] 1> no one is recoverying
[junit4] 2> 3811405 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: tmp_collection
[junit4] 2> 3811407 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3811408 INFO (zkConnectionManagerCallback-30558-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3811408 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3811409 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 3811410 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:34893/solr ready
[junit4] 2> 3811412 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :delete with params name=tmp_collection&action=DELETE&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 3811414 INFO (OverseerThreadFactory-30505-thread-4-processing-n:127.0.0.1:43603_sp_) [n:127.0.0.1:43603_sp_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler Executing Collection Cmd=action=UNLOAD&deleteInstanceDir=true&deleteDataDir=true&deleteMetricsHistory=true, asyncId=null
[junit4] 2> 3811415 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ x:tmp_collection_shard1_replica_n1 ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.tmp_collection.shard1.replica_n1 tag=null
[junit4] 2> 3811415 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.tmp_collection.shard1.replica_n3 tag=null
[junit4] 2> 3811415 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ x:tmp_collection_shard1_replica_n1 ] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@61ab14d3: rootName = null, domain = solr.core.tmp_collection.shard1.replica_n1, service url = null, agent id = null] for registry solr.core.tmp_collection.shard1.replica_n1/com.codahale.metrics.MetricRegistry@54638bd8
[junit4] 2> 3811415 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ x:tmp_collection_shard1_replica_n3 ] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@683dba46: rootName = null, domain = solr.core.tmp_collection.shard1.replica_n3, service url = null, agent id = null] for registry solr.core.tmp_collection.shard1.replica_n3/com.codahale.metrics.MetricRegistry@421a53a8
[junit4] 2> 3811452 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n3] CLOSING SolrCore org.apache.solr.core.SolrCore@1cac3fa9
[junit4] 2> 3811452 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.tmp_collection.shard1.replica_n3 tag=SolrCore@1cac3fa9
[junit4] 2> 3811452 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.tmp_collection.shard1.leader tag=SolrCore@1cac3fa9
[junit4] 2> 3811452 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.c.SolrCore [tmp_collection_shard1_replica_n1] CLOSING SolrCore org.apache.solr.core.SolrCore@5a2cbb58
[junit4] 2> 3811453 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.h.CdcrRequestHandler Solr core is being closed - shutting down CDCR handler @ tmp_collection:shard1
[junit4] 2> 3811453 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.core.tmp_collection.shard1.replica_n1 tag=SolrCore@5a2cbb58
[junit4] 2> 3811453 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.collection.tmp_collection.shard1.leader tag=SolrCore@5a2cbb58
[junit4] 2> 3811453 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.h.CdcrRequestHandler Solr core is being closed - shutting down CDCR handler @ tmp_collection:shard1
[junit4] 2> 3811453 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.u.DirectUpdateHandler2 Committing on IndexWriter.close() ... SKIPPED (unnecessary).
[junit4] 2> 3811453 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.u.DirectUpdateHandler2 Committing on IndexWriter.close() ... SKIPPED (unnecessary).
[junit4] 2> 3811455 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/tmp_collection/terms/shard1 to Terms{values={core_node4=0}, version=2}
[junit4] 2> 3811456 INFO (qtp1920697653-117931) [n:127.0.0.1:43603_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={deleteInstanceDir=true&deleteMetricsHistory=true&core=tmp_collection_shard1_replica_n1&qt=/admin/cores&deleteDataDir=true&action=UNLOAD&wt=javabin&version=2} status=0 QTime=41
[junit4] 2> 3811496 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.c.ZkShardTerms Successful update of terms at /collections/tmp_collection/terms/shard1 to Terms{values={}, version=3}
[junit4] 2> 3811496 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.c.ShardLeaderElectionContextBase No version found for ephemeral leader parent node, won't remove previous leader registration.
[junit4] 2> 3811497 INFO (qtp1353287241-117967) [n:127.0.0.1:42193_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={deleteInstanceDir=true&deleteMetricsHistory=true&core=tmp_collection_shard1_replica_n3&qt=/admin/cores&deleteDataDir=true&action=UNLOAD&wt=javabin&version=2} status=0 QTime=81
[junit4] 2> 3811599 INFO (zkCallback-30524-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDeleted path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811599 INFO (zkCallback-30497-thread-4) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDeleted path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811599 INFO (zkCallback-30524-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDeleted path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811599 INFO (zkCallback-30497-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDeleted path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811599 INFO (zkCallback-30524-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDeleted path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811599 INFO (zkCallback-30497-thread-3) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDeleted path:/collections/tmp_collection/state.json] for collection [tmp_collection] has occurred - updating... (live nodes size: [2])
[junit4] 2> 3811606 INFO (qtp1353287241-117964) [n:127.0.0.1:42193_sp_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={name=tmp_collection&action=DELETE&wt=javabin&version=2} status=0 QTime=194
[junit4] 2> 3811710 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Waiting for client to connect to ZooKeeper
[junit4] 2> 3811711 INFO (zkConnectionManagerCallback-30563-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3811711 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ConnectionManager Client is connected to ZooKeeper
[junit4] 2> 3811712 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplication-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 3811713 INF
[...truncated too long message...]
artialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.AbstractConnector Stopped ServerConnector@13b2318{SSL, (ssl, http/1.1)}{127.0.0.1:42659}
[junit4] 2> 3937114 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@2da0ac4{/sp_,null,UNAVAILABLE}
[junit4] 2> 3937114 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.e.j.s.session node0 Stopped scavenging
[junit4] 2> 3937116 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.a.s.SolrTestCaseJ4 ###Ending testPartialReplicationWithTruncatedTlog
[junit4] 2> 3937116 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer Shutting down ZkTestServer.
[junit4] 2> 3937322 WARN (ZkTestServer Run Thread) [ ] o.a.s.c.ZkTestServer Watch limit violations:
[junit4] 2> Maximum concurrent create/delete watches above limit:
[junit4] 2>
[junit4] 2> 23 /solr/aliases.json
[junit4] 2> 13 /solr/collections/source_collection/terms/shard1
[junit4] 2> 8 /solr/collections/tmp_collection/terms/shard1
[junit4] 2> 7 /solr/collections/source_collection/leaders/shard1/leader
[junit4] 2> 7 /solr/collections/tmp_collection/state.json
[junit4] 2> 5 /solr/configs/conf1
[junit4] 2> 3 /solr/packages.json
[junit4] 2> 3 /solr/security.json
[junit4] 2> 3 /solr/collections/tmp_collection/leaders/shard1/leader
[junit4] 2>
[junit4] 2> Maximum concurrent data watches above limit:
[junit4] 2>
[junit4] 2> 49 /solr/collections/source_collection/state.json
[junit4] 2> 24 /solr/collections/tmp_collection/state.json
[junit4] 2> 23 /solr/clusterprops.json
[junit4] 2> 23 /solr/clusterstate.json
[junit4] 2> 4 /solr/collections/source_collection/cdcr/state/buffer
[junit4] 2> 4 /solr/collections/source_collection/cdcr/state/process
[junit4] 2> 2 /solr/overseer_elect/election/72194033210228741-127.0.0.1:35369_sp_-n_0000000000
[junit4] 2> 2 /solr/collections/source_collection/leader_elect/shard1/election/72194033210228741-core_node3-n_0000000000
[junit4] 2> 2 /solr/collections/tmp_collection/cdcr/state/buffer
[junit4] 2> 2 /solr/autoscaling.json
[junit4] 2> 2 /solr/collections/tmp_collection/cdcr/state/process
[junit4] 2>
[junit4] 2> Maximum concurrent children watches above limit:
[junit4] 2>
[junit4] 2> 44 /solr/overseer/queue
[junit4] 2> 32 /solr/collections
[junit4] 2> 31 /solr/live_nodes
[junit4] 2> 17 /solr/overseer/collection-queue-work
[junit4] 2> 2 /solr/autoscaling/events/.scheduled_maintenance
[junit4] 2> 2 /solr/autoscaling/events/.auto_add_replicas
[junit4] 2> 2 /solr/overseer/queue-work
[junit4] 2>
[junit4] 2> 3937324 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer waitForServerDown: 127.0.0.1:45463
[junit4] 2> 3937324 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:45463
[junit4] 2> 3937325 INFO (TEST-CdcrReplicationHandlerTest.testPartialReplicationWithTruncatedTlog-seed#[2769CD055B49FDBE]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 45463
[junit4] 2> NOTE: leaving temporary files on disk at: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-8.6/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.cdcr.CdcrReplicationHandlerTest_2769CD055B49FDBE-001
[junit4] 2> Jul 24, 2020 11:46:44 PM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
[junit4] 2> WARNING: Will linger awaiting termination of 1 leaked thread(s).
[junit4] 2> NOTE: test params are: codec=Asserting(Lucene86): {_root_=PostingsFormat(name=Direct), id=BlockTreeOrds(blocksize=128)}, docValues:{_version_=DocValuesFormat(name=Asserting)}, maxPointsInLeafNode=706, maxMBSortInHeap=5.827440128438619, sim=Asserting(RandomSimilarity(queryNorm=false): {}), locale=en-CA, timezone=Europe/Monaco
[junit4] 2> NOTE: Linux 4.15.0-108-generic amd64/Oracle Corporation 1.8.0_252 (64-bit)/cpus=4,threads=1,free=289723232,total=526909440
[junit4] 2> NOTE: All tests run in this JVM: [TestPhraseSuggestions, OverseerTaskQueueTest, TestDocTermOrds, XmlInterpolationTest, TestWithCollection, TestCodecSupport, TestRetrieveFieldsOptimizer, TestPKIAuthenticationPlugin, JWTAuthPluginIntegrationTest, TestSystemIdResolver, NumericFieldsTest, CursorMarkTest, DistributedFacetPivotWhiteBoxTest, TestTolerantUpdateProcessorRandomCloud, RestoreTriggerStateTest, CloudExitableDirectoryReaderTest, FieldMutatingUpdateProcessorTest, TestSubQueryTransformerCrossCore, TestBlobHandler, SolrPluginUtilsTest, ScriptEngineTest, RootFieldTest, NoCacheHeaderTest, TestJettySolrRunner, ConcurrentCreateCollectionTest, AdminHandlersProxyTest, JavabinLoaderTest, SolrInfoBeanTest, SimpleMLTQParserTest, TestPushWriter, ClassificationUpdateProcessorTest, NodeLostTriggerIntegrationTest, JavaBinAtomicUpdateMultivalueTest, PathHierarchyTokenizerFactoryTest, TestUseDocValuesAsStored, TestLFUCache, AddBlockUpdateTest, TestReload, BasicAuthIntegrationTest, TestCSVResponseWriter, TestSolrTestCaseJ4, TestLeaderElectionWithEmptyReplica, CacheHeaderTest, HdfsAutoAddReplicasIntegrationTest, TestSolrCloudWithDelegationTokens, TestMiniSolrCloudClusterSSL, TestNumericRangeQuery32, ConjunctionSolrSpellCheckerTest, AnalyticsMergeStrategyTest, JSONWriterTest, TestConfigSetsAPIZkFailure, TestCollationFieldDocValues, TestCloudPhrasesIdentificationComponent, QueryElevationComponentTest, TestNestedDocsSort, TestUpdate, TestReplicationHandler, DocValuesMissingTest, ScheduledTriggerTest, TestDistributedSearch, CoreMergeIndexesAdminHandlerTest, TestDFISimilarityFactory, BlockJoinFacetDistribTest, TestMaxTokenLenTokenizer, HLLSerializationTest, TestContainerPlugin, SolrShardReporterTest, QueryParsingTest, EnumFieldTest, SegmentsInfoRequestHandlerTest, InfixSuggestersTest, TestShardHandlerFactory, TestSolr4Spatial, TestSolrCloudSnapshots, TestPivotHelperCode, TestReqParamsAPI, TestDistribDocBasedVersion, OverseerSolrResponseTest, TestRealTimeGet, TestCoreAdminApis, CollectionTooManyReplicasTest, AuthToolTest, BadComponentTest, TolerantUpdateProcessorTest, TestSolrDeletionPolicy1, TestPostingsSolrHighlighter, TestRequestForwarding, TestSafeXMLParsing, IndexBasedSpellCheckerTest, ImplicitSnitchTest, HdfsRecoveryZkTest, TestManagedSchemaAPI, StatelessScriptUpdateProcessorFactoryTest, TestRandomFaceting, SparseHLLTest, TestHighlightDedupGrouping, CategoryRoutedAliasUpdateProcessorTest, TestFieldCollectionResource, DisMaxRequestHandlerTest, ChaosMonkeyNothingIsSafeTest, NodeMutatorTest, AddReplicaTest, SortSpecParsingTest, TestDynamicFieldNamesIndexCorrectly, RangeFacetCloudTest, CrossCollectionJoinQueryTest, HighlighterMaxOffsetTest, SaslZkACLProviderTest, QueryResultKeyTest, SystemLogListenerTest, TestWordDelimiterFilterFactory, MetricsHistoryHandlerTest, JWTVerificationkeyResolverTest, FieldAnalysisRequestHandlerTest, DateMathParserTest, OverseerCollectionConfigSetProcessorTest, TestCaffeineCache, TestCloudJSONFacetSKG, ConnectionManagerTest, TestCloudConsistency, PingRequestHandlerTest, MaxSizeAutoCommitTest, FileUtilsTest, TestPayloadCheckQParserPlugin, SearchHandlerTest, TestPullReplica, TestXmlQParser, SolrIndexConfigTest, HdfsBasicDistributedZk2Test, MinimalSchemaTest, TestUniqueKeyFieldResource, TestNumericTerms32, TestLRUStatsCache, TestStressUserVersions, TestFacetMethods, TestSchemaSimilarityResource, HdfsChaosMonkeySafeLeaderTest, TestSolrCoreParser, ExitableDirectoryReaderTest, SolrLogAuditLoggerPluginTest, MetricsHistoryWithAuthIntegrationTest, ChaosMonkeySafeLeaderWithPullReplicasTest, DistributedQueryComponentOptimizationTest, ZkCLITest, PreAnalyzedFieldManagedSchemaCloudTest, TestFieldTypeResource, TermVectorComponentDistributedTest, LeaderVoteWaitTimeoutTest, TestInPlaceUpdateWithRouteField, HttpPartitionWithTlogReplicasTest, LeaderFailureAfterFreshStartTest, MigrateRouteKeyTest, MoveReplicaTest, MultiSolrCloudTestCaseTest, OutOfBoxZkACLAndCredentialsProvidersTest, PeerSyncReplicationTest, ReplaceNodeNoTargetTest, ReplaceNodeTest, RestartWhileUpdatingTest, ShardRoutingTest, SharedFSAutoReplicaFailoverTest, SolrCLIZkUtilsTest, SolrCloudExampleTest, SplitShardTest, SyncSliceTest, TestCloudDeleteByQuery, TestCloudPseudoReturnFields, TestCloudRecovery, TestCloudSearcherWarming, TestClusterProperties, TestConfigSetsAPI, TestCryptoKeys, TestPullReplicaErrorHandling, TestQueryingOnDownCollection, TestRandomFlRTGCloud, TestSegmentSorting, TestShortCircuitedRequests, TestSizeLimitedDistributedMap, TestSolrCloudWithSecureImpersonation, TestStressCloudBlindAtomicUpdates, TestTlogReplayVsRecovery, TestTlogReplica, TriLevelCompositeIdRoutingTest, ZkFailoverTest, ZkNodePropsTest, ZkShardTermsTest, ZkSolrClientTest, HdfsCollectionsAPIDistributedZkTest, TestHdfsCloudBackupRestore, AutoAddReplicasIntegrationTest, HttpTriggerListenerTest, IndexSizeTriggerMixedBoundsTest, MetricTriggerIntegrationTest, NodeLostTriggerTest, SearchRateTriggerIntegrationTest, TriggerEventQueueTest, TriggerSetPropertiesIntegrationTest, TestSimComputePlanAction, TestSimDistribStateManager, TestSimExtremeIndexing, TestSimGenericDistributedQueue, TestSimNodeAddedTrigger, TestSimNodeLostTrigger, TestSimScenario, TestSimTriggerIntegration, TestSimUtils, TestSnapshotCloudManager, BaseCdcrDistributedZkTest, CdcrBidirectionalTest, CdcrBootstrapTest, CdcrReplicationHandlerTest]
[junit4] Completed [590/914 (1!)] on J1 in 132.68s, 5 tests, 1 failure <<< FAILURES!
[...truncated 53182 lines...]