You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@lucene.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2018/07/02 22:58:44 UTC
[JENKINS] Lucene-Solr-Tests-7.x - Build # 663 - Unstable
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/663/
1 tests failed.
FAILED: org.apache.solr.cloud.SyncSliceTest.test
Error Message:
Could not load collection from ZK: collection1
Stack Trace:
org.apache.solr.common.SolrException: Could not load collection from ZK: collection1
at __randomizedtesting.SeedInfo.seed([D00D26B5F179151C:5859196F5F8578E4]:0)
at org.apache.solr.common.cloud.ZkStateReader.getCollectionLive(ZkStateReader.java:1316)
at org.apache.solr.common.cloud.ZkStateReader$LazyCollectionRef.get(ZkStateReader.java:732)
at org.apache.solr.common.cloud.ClusterState.getCollectionOrNull(ClusterState.java:148)
at org.apache.solr.common.cloud.ClusterState.getCollectionOrNull(ClusterState.java:131)
at org.apache.solr.cloud.AbstractFullDistribZkTestBase.getTotalReplicas(AbstractFullDistribZkTestBase.java:495)
at org.apache.solr.cloud.AbstractFullDistribZkTestBase.createJettys(AbstractFullDistribZkTestBase.java:448)
at org.apache.solr.cloud.AbstractFullDistribZkTestBase.createServers(AbstractFullDistribZkTestBase.java:341)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1006)
at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:983)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /collections/collection1/state.json
at org.apache.zookeeper.KeeperException.create(KeeperException.java:130)
at org.apache.zookeeper.KeeperException.create(KeeperException.java:54)
at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:1215)
at org.apache.solr.common.cloud.SolrZkClient.lambda$getData$5(SolrZkClient.java:341)
at org.apache.solr.common.cloud.ZkCmdExecutor.retryOperation(ZkCmdExecutor.java:60)
at org.apache.solr.common.cloud.SolrZkClient.getData(SolrZkClient.java:341)
at org.apache.solr.common.cloud.ZkStateReader.fetchCollectionState(ZkStateReader.java:1328)
at org.apache.solr.common.cloud.ZkStateReader.getCollectionLive(ZkStateReader.java:1314)
... 39 more
Build Log:
[...truncated 13264 lines...]
[junit4] Suite: org.apache.solr.cloud.SyncSliceTest
[junit4] 2> Creating dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/init-core-data-001
[junit4] 2> 1596056 WARN (SUITE-SyncSliceTest-seed#[D00D26B5F179151C]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=49 numCloses=49
[junit4] 2> 1596056 INFO (SUITE-SyncSliceTest-seed#[D00D26B5F179151C]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false
[junit4] 2> 1596057 INFO (SUITE-SyncSliceTest-seed#[D00D26B5F179151C]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 2> 1596057 INFO (SUITE-SyncSliceTest-seed#[D00D26B5F179151C]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> 1596070 INFO (SUITE-SyncSliceTest-seed#[D00D26B5F179151C]-worker) [ ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
[junit4] 2> 1596072 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 1596072 INFO (Thread-1726) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 1596072 INFO (Thread-1726) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 1596102 ERROR (Thread-1726) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 1596179 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ZkTestServer start zk server on port:38938
[junit4] 2> 1596197 INFO (zkConnectionManagerCallback-5145-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1596213 INFO (zkConnectionManagerCallback-5147-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1596215 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
[junit4] 2> 1596230 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml
[junit4] 2> 1596231 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 1596232 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
[junit4] 2> 1596251 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
[junit4] 2> 1596262 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
[junit4] 2> 1596266 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
[junit4] 2> 1596267 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
[junit4] 2> 1596268 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 1596278 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
[junit4] 2> 1596279 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractZkTestCase put /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
[junit4] 2> 1596280 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly asked otherwise
[junit4] 2> 1596706 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
[junit4] 2> 1596723 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 1596723 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 1596723 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 1596723 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@572207d0{/,null,AVAILABLE}
[junit4] 2> 1596741 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@cf2b851{HTTP/1.1,[http/1.1]}{127.0.0.1:45772}
[junit4] 2> 1596741 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server Started @1596929ms
[junit4] 2> 1596741 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/tempDir-001/control/data, hostContext=/, hostPort=45772, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/control-001/cores}
[junit4] 2> 1596742 ERROR (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 1596742 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 1596742 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 7.5.0
[junit4] 2> 1596742 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1596742 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 1596742 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-02T21:24:26.987Z
[junit4] 2> 1596745 INFO (zkConnectionManagerCallback-5149-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1596765 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 1596765 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/control-001/solr.xml
[junit4] 2> 1596768 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 1596768 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 1596802 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 1597029 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38938/solr
[junit4] 2> 1597030 INFO (zkConnectionManagerCallback-5153-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1597032 INFO (zkConnectionManagerCallback-5155-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1597401 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1597460 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:45772_
[junit4] 2> 1597461 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.c.Overseer Overseer (id=72903274989551620-127.0.0.1:45772_-n_0000000000) starting
[junit4] 2> 1597649 INFO (zkConnectionManagerCallback-5162-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1597683 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38938/solr ready
[junit4] 2> 1597686 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:45772_
[junit4] 2> 1597707 INFO (OverseerStateUpdate-72903274989551620-127.0.0.1:45772_-n_0000000000) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1597725 INFO (zkCallback-5161-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1597841 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 1597905 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1598001 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1598002 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1598003 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:45772_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/control-001/cores
[junit4] 2> 1598109 INFO (zkConnectionManagerCallback-5168-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1598109 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1598128 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38938/solr ready
[junit4] 2> 1598129 INFO (qtp1604160038-14093) [n:127.0.0.1:45772_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:45772_&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1598182 INFO (OverseerThreadFactory-3918-thread-1) [ ] o.a.s.c.a.c.CreateCollectionCmd Create collection control_collection
[junit4] 2> 1598297 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ x:control_collection_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 1598297 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ x:control_collection_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 1599344 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1599511 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
[junit4] 2> 1600041 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 1600182 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' using configuration from collection control_collection, trusted=true
[junit4] 2> 1600183 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.replica_n1' (registry 'solr.core.control_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1600183 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1600183 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/control-001/cores/control_collection_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/control-001/cores/control_collection_shard1_replica_n1/data/]
[junit4] 2> 1600198 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=43, maxMergeAtOnceExplicit=38, maxMergedSegmentMB=82.88671875, floorSegmentMB=1.0556640625, forceMergeDeletesPctAllowed=26.204506614826144, segmentsPerTier=44.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0, reclaimDeletesWeight=3.541249386275915
[junit4] 2> 1600200 WARN (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 1600391 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1600391 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1600393 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1600405 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1600406 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=24, maxMergeAtOnceExplicit=14, maxMergedSegmentMB=62.384765625, floorSegmentMB=1.6826171875, forceMergeDeletesPctAllowed=19.8397720320704, segmentsPerTier=47.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.15356301063868846, reclaimDeletesWeight=2.4595704375828533
[junit4] 2> 1600407 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@4be9bb43[control_collection_shard1_replica_n1] main]
[junit4] 2> 1600408 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1600409 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1600422 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1600423 INFO (searcherExecutor-3923-thread-1-processing-n:127.0.0.1:45772_ x:control_collection_shard1_replica_n1 c:control_collection s:shard1) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [control_collection_shard1_replica_n1] Registered new searcher Searcher@4be9bb43[control_collection_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1600423 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1604915477262368768
[junit4] 2> 1600455 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/control_collection/terms/shard1 to Terms{values={core_node2=0}, version=0}
[junit4] 2> 1600457 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 1600457 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 1600457 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:45772/control_collection_shard1_replica_n1/
[junit4] 2> 1600458 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 1600458 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy http://127.0.0.1:45772/control_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 1600458 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
[junit4] 2> 1600473 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:45772/control_collection_shard1_replica_n1/ shard1
[junit4] 2> 1600532 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 1600533 INFO (qtp1604160038-14088) [n:127.0.0.1:45772_ c:control_collection s:shard1 x:control_collection_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=2236
[junit4] 2> 1600561 INFO (qtp1604160038-14093) [n:127.0.0.1:45772_ ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
[junit4] 2> 1600562 INFO (OverseerCollectionConfigSetProcessor-72903274989551620-127.0.0.1:45772_-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 1600641 INFO (zkCallback-5154-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 1601564 INFO (qtp1604160038-14093) [n:127.0.0.1:45772_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:45772_&wt=javabin&version=2} status=0 QTime=3434
[junit4] 2> 1601581 INFO (zkConnectionManagerCallback-5173-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1601581 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1601582 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38938/solr ready
[junit4] 2> 1601582 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false
[junit4] 2> 1601651 INFO (qtp1604160038-14089) [n:127.0.0.1:45772_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1601675 INFO (OverseerThreadFactory-3918-thread-2) [ ] o.a.s.c.a.c.CreateCollectionCmd Create collection collection1
[junit4] 2> 1601676 WARN (OverseerThreadFactory-3918-thread-2) [ ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to create a collection (collection1) without cores.
[junit4] 2> 1601931 INFO (qtp1604160038-14089) [n:127.0.0.1:45772_ ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
[junit4] 2> 1601958 INFO (qtp1604160038-14089) [n:127.0.0.1:45772_ ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2} status=0 QTime=308
[junit4] 2> 1602380 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-1-001 of type NRT
[junit4] 2> 1602380 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
[junit4] 2> 1602397 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 1602397 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 1602397 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 1602397 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@8128822{/,null,AVAILABLE}
[junit4] 2> 1602398 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@1be2efb5{HTTP/1.1,[http/1.1]}{127.0.0.1:34972}
[junit4] 2> 1602398 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server Started @1602586ms
[junit4] 2> 1602399 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/tempDir-001/jetty1, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=34972, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-1-001/cores}
[junit4] 2> 1602399 ERROR (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 1602399 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 1602399 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 7.5.0
[junit4] 2> 1602399 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1602399 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 1602399 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-02T21:24:32.644Z
[junit4] 2> 1602401 INFO (zkConnectionManagerCallback-5175-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1602421 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 1602421 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-1-001/solr.xml
[junit4] 2> 1602437 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 1602437 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 1602455 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 1602958 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38938/solr
[junit4] 2> 1602960 INFO (zkConnectionManagerCallback-5179-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1602961 INFO (zkConnectionManagerCallback-5181-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1603003 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1603005 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1603015 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 1603015 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:34972_
[junit4] 2> 1603016 INFO (zkCallback-5154-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 1603017 INFO (zkCallback-5172-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 1603017 INFO (zkCallback-5161-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 1603052 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 1603092 INFO (zkConnectionManagerCallback-5188-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1603105 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 1603106 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38938/solr ready
[junit4] 2> 1603107 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 1603163 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1603180 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1603180 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1603182 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:34972_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-1-001/cores
[junit4] 2> 1603234 INFO (qtp1951284671-14148) [n:127.0.0.1:34972_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:34972_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1603237 INFO (OverseerCollectionConfigSetProcessor-72903274989551620-127.0.0.1:45772_-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000002 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 1603237 INFO (OverseerThreadFactory-3918-thread-3) [ c:collection1 s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:34972_ for creating new replica
[junit4] 2> 1603258 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ x:collection1_shard1_replica_n21] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 1604301 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1604341 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.s.IndexSchema [collection1_shard1_replica_n21] Schema name=test
[junit4] 2> 1604741 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 1604797 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.CoreContainer Creating SolrCore 'collection1_shard1_replica_n21' using configuration from collection collection1, trusted=true
[junit4] 2> 1604798 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard1.replica_n21' (registry 'solr.core.collection1.shard1.replica_n21') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1604798 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1604798 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SolrCore [[collection1_shard1_replica_n21] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-1-001/cores/collection1_shard1_replica_n21], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-1-001/cores/collection1_shard1_replica_n21/data/]
[junit4] 2> 1604813 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=43, maxMergeAtOnceExplicit=38, maxMergedSegmentMB=82.88671875, floorSegmentMB=1.0556640625, forceMergeDeletesPctAllowed=26.204506614826144, segmentsPerTier=44.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0, reclaimDeletesWeight=3.541249386275915
[junit4] 2> 1604815 WARN (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 1604953 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1604953 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1605029 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1605029 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1605030 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=24, maxMergeAtOnceExplicit=14, maxMergedSegmentMB=62.384765625, floorSegmentMB=1.6826171875, forceMergeDeletesPctAllowed=19.8397720320704, segmentsPerTier=47.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.15356301063868846, reclaimDeletesWeight=2.4595704375828533
[junit4] 2> 1605031 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.s.SolrIndexSearcher Opening [Searcher@62c1e9ec[collection1_shard1_replica_n21] main]
[junit4] 2> 1605032 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1605032 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1605033 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1605033 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1604915482096304128
[junit4] 2> 1605034 INFO (searcherExecutor-3937-thread-1-processing-n:127.0.0.1:34972_ x:collection1_shard1_replica_n21 c:collection1 s:shard1) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SolrCore [collection1_shard1_replica_n21] Registered new searcher Searcher@62c1e9ec[collection1_shard1_replica_n21] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1605039 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.ZkShardTerms Successful update of terms at /collections/collection1/terms/shard1 to Terms{values={core_node22=0}, version=0}
[junit4] 2> 1605050 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 1605050 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 1605050 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:34972/collection1_shard1_replica_n21/
[junit4] 2> 1605050 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 1605050 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy http://127.0.0.1:34972/collection1_shard1_replica_n21/ has no replicas
[junit4] 2> 1605050 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
[junit4] 2> 1605061 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:34972/collection1_shard1_replica_n21/ shard1
[junit4] 2> 1605164 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 1605166 INFO (qtp1951284671-14143) [n:127.0.0.1:34972_ c:collection1 s:shard1 x:collection1_shard1_replica_n21] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1909
[junit4] 2> 1605168 INFO (qtp1951284671-14148) [n:127.0.0.1:34972_ c:collection1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:34972_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1934
[junit4] 2> 1605239 INFO (OverseerCollectionConfigSetProcessor-72903274989551620-127.0.0.1:45772_-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000004 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 1605267 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [2])
[junit4] 2> 1605496 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-2-001 of type NRT
[junit4] 2> 1605497 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
[junit4] 2> 1605497 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 1605498 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 1605498 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session node0 Scavenging every 600000ms
[junit4] 2> 1605498 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@148d18aa{/,null,AVAILABLE}
[junit4] 2> 1605498 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@1b49f9e4{HTTP/1.1,[http/1.1]}{127.0.0.1:33972}
[junit4] 2> 1605498 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server Started @1605686ms
[junit4] 2> 1605502 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/tempDir-001/jetty2, solrconfig=solrconfig.xml, hostContext=/, hostPort=33972, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-2-001/cores}
[junit4] 2> 1605502 ERROR (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 1605502 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 1605502 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 7.5.0
[junit4] 2> 1605502 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1605502 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 1605502 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-02T21:24:35.747Z
[junit4] 2> 1605504 INFO (zkConnectionManagerCallback-5191-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1605505 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 1605505 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-2-001/solr.xml
[junit4] 2> 1605508 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 1605508 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 1605509 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 1605805 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38938/solr
[junit4] 2> 1605807 INFO (zkConnectionManagerCallback-5195-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1605816 INFO (zkConnectionManagerCallback-5197-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1605831 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 1605842 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1605857 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 1605858 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:33972_
[junit4] 2> 1605860 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 1605860 INFO (zkCallback-5172-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 1605860 INFO (zkCallback-5154-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 1605873 INFO (zkCallback-5161-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 1605873 INFO (zkCallback-5187-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 1605885 INFO (zkCallback-5196-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 1605935 INFO (zkConnectionManagerCallback-5204-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1605962 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
[junit4] 2> 1605963 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38938/solr ready
[junit4] 2> 1605964 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 1606012 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1606050 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1606050 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1606051 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:33972_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-2-001/cores
[junit4] 2> 1606177 INFO (qtp1254129664-14186) [n:127.0.0.1:33972_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:33972_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1606188 INFO (OverseerThreadFactory-3918-thread-4) [ c:collection1 s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:33972_ for creating new replica
[junit4] 2> 1606199 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ x:collection1_shard1_replica_n23] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n23&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 1606303 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
[junit4] 2> 1607226 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1607246 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.s.IndexSchema [collection1_shard1_replica_n23] Schema name=test
[junit4] 2> 1607510 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 1607565 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.CoreContainer Creating SolrCore 'collection1_shard1_replica_n23' using configuration from collection collection1, trusted=true
[junit4] 2> 1607565 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard1.replica_n23' (registry 'solr.core.collection1.shard1.replica_n23') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1607565 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1607566 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.SolrCore [[collection1_shard1_replica_n23] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-2-001/cores/collection1_shard1_replica_n23], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-2-001/cores/collection1_shard1_replica_n23/data/]
[junit4] 2> 1607576 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=43, maxMergeAtOnceExplicit=38, maxMergedSegmentMB=82.88671875, floorSegmentMB=1.0556640625, forceMergeDeletesPctAllowed=26.204506614826144, segmentsPerTier=44.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0, reclaimDeletesWeight=3.541249386275915
[junit4] 2> 1607578 WARN (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
[junit4] 2> 1607795 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 1607795 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1607797 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1607797 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1607798 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=24, maxMergeAtOnceExplicit=14, maxMergedSegmentMB=62.384765625, floorSegmentMB=1.6826171875, forceMergeDeletesPctAllowed=19.8397720320704, segmentsPerTier=47.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.15356301063868846, reclaimDeletesWeight=2.4595704375828533
[junit4] 2> 1607798 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.s.SolrIndexSearcher Opening [Searcher@6bc821ec[collection1_shard1_replica_n23] main]
[junit4] 2> 1607816 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
[junit4] 2> 1607816 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 1607817 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1607818 INFO (searcherExecutor-3951-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.SolrCore [collection1_shard1_replica_n23] Registered new searcher Searcher@6bc821ec[collection1_shard1_replica_n23] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1607818 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1604915485016588288
[junit4] 2> 1607834 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.ZkShardTerms Successful update of terms at /collections/collection1/terms/shard1 to Terms{values={core_node24=0, core_node22=0}, version=1}
[junit4] 2> 1607848 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.c.ZkController Core needs to recover:collection1_shard1_replica_n23
[junit4] 2> 1607849 INFO (updateExecutor-5192-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 1607868 INFO (qtp1254129664-14181) [n:127.0.0.1:33972_ c:collection1 s:shard1 x:collection1_shard1_replica_n23] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n23&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1669
[junit4] 2> 1607884 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
[junit4] 2> 1607885 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
[junit4] 2> 1607909 INFO (qtp1951284671-14144) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.c.S.Request [collection1_shard1_replica_n21] webapp= path=/admin/ping params={wt=javabin&version=2} hits=0 status=0 QTime=0
[junit4] 2> 1607909 INFO (qtp1951284671-14144) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.c.S.Request [collection1_shard1_replica_n21] webapp= path=/admin/ping params={wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1607921 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[collection1_shard1_replica_n23]
[junit4] 2> 1607921 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 1607921 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Publishing state of core [collection1_shard1_replica_n23] as recovering, leader is [http://127.0.0.1:34972/collection1_shard1_replica_n21/] and I am [http://127.0.0.1:33972/collection1_shard1_replica_n23/]
[junit4] 2> 1607973 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.ZkShardTerms Successful update of terms at /collections/collection1/terms/shard1 to Terms{values={core_node24_recovering=0, core_node24=0, core_node22=0}, version=2}
[junit4] 2> 1607994 INFO (qtp1254129664-14186) [n:127.0.0.1:33972_ c:collection1 ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:33972_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1817
[junit4] 2> 1607995 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:34972]; [WaitForState: action=PREPRECOVERY&core=collection1_shard1_replica_n21&nodeName=127.0.0.1:33972_&coreNodeName=core_node24&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 2> 1607999 INFO (qtp1951284671-14147) [n:127.0.0.1:34972_ x:collection1_shard1_replica_n21] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node24, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true, maxTime: 183 s
[junit4] 2> 1608108 INFO (qtp1951284671-14147) [n:127.0.0.1:34972_ x:collection1_shard1_replica_n21] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard1, thisCore=collection1_shard1_replica_n21, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:33972_, coreNodeName=core_node24, onlyIfActiveCheckResult=false, nodeProps: core_node24:{"core":"collection1_shard1_replica_n23","base_url":"http://127.0.0.1:33972","node_name":"127.0.0.1:33972_","state":"down","type":"NRT"}
[junit4] 2> 1608109 INFO (zkCallback-5196-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
[junit4] 2> 1608109 INFO (zkCallback-5196-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
[junit4] 2> 1608124 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
[junit4] 2> 1608197 INFO (OverseerCollectionConfigSetProcessor-72903274989551620-127.0.0.1:45772_-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000006 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 1608463 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-3-001 of type NRT
[junit4] 2> 1608464 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
[junit4] 2> 1608465 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 1608465 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 1608465 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 1608466 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@5ddbc677{/,null,AVAILABLE}
[junit4] 2> 1608466 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.AbstractConnector Started ServerConnector@4fd6aeb8{HTTP/1.1,[http/1.1]}{127.0.0.1:43452}
[junit4] 2> 1608467 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.e.j.s.Server Started @1608655ms
[junit4] 2> 1608467 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/tempDir-001/jetty3, solrconfig=solrconfig.xml, hostContext=/, hostPort=43452, coreRootDirectory=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-3-001/cores}
[junit4] 2> 1608468 ERROR (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 1608468 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 1608468 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version 7.5.0
[junit4] 2> 1608468 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1608468 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 1608468 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-02T21:24:38.713Z
[junit4] 2> 1608469 INFO (zkConnectionManagerCallback-5208-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1608470 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
[junit4] 2> 1608470 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-3-001/solr.xml
[junit4] 2> 1608473 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 1608494 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 1608495 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9, but no JMX reporters were configured - adding default JMX reporter.
[junit4] 2> 1609076 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38938/solr
[junit4] 2> 1609114 INFO (qtp1951284671-14147) [n:127.0.0.1:34972_ x:collection1_shard1_replica_n21] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard1, thisCore=collection1_shard1_replica_n21, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:33972_, coreNodeName=core_node24, onlyIfActiveCheckResult=false, nodeProps: core_node24:{"core":"collection1_shard1_replica_n23","base_url":"http://127.0.0.1:33972","node_name":"127.0.0.1:33972_","state":"recovering","type":"NRT"}
[junit4] 2> 1609115 INFO (qtp1951284671-14147) [n:127.0.0.1:34972_ x:collection1_shard1_replica_n21] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node24, state: recovering, checkLive: true, onlyIfLeader: true for: 1 seconds.
[junit4] 2> 1609115 INFO (qtp1951284671-14147) [n:127.0.0.1:34972_ x:collection1_shard1_replica_n21] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:33972_&onlyIfLeaderActive=true&core=collection1_shard1_replica_n21&coreNodeName=core_node24&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=1116
[junit4] 2> 1609116 INFO (zkConnectionManagerCallback-5212-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1609134 INFO (zkConnectionManagerCallback-5214-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1609148 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
[junit4] 2> 1609150 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1609152 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
[junit4] 2> 1609152 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:43452_
[junit4] 2> 1609153 INFO (zkCallback-5154-thread-2) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609153 INFO (zkCallback-5196-thread-2) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609154 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609154 INFO (zkCallback-5203-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609157 INFO (zkCallback-5161-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609157 INFO (zkCallback-5172-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609157 INFO (zkCallback-5187-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609173 INFO (zkCallback-5213-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 1609207 INFO (zkConnectionManagerCallback-5221-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1609208 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4)
[junit4] 2> 1609217 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38938/solr ready
[junit4] 2> 1609218 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 1609300 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1609334 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1609334 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@38e5c0b9
[junit4] 2> 1609347 INFO (TEST-SyncSliceTest.test-seed#[D00D26B5F179151C]) [n:127.0.0.1:43452_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/../../../../../../../../../../home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J2/temp/solr.cloud.SyncSliceTest_D00D26B5F179151C-001/shard-3-001/cores
[junit4] 2> 1609445 INFO (qtp1254129664-14182) [n:127.0.0.1:33972_ ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:43452_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1609459 INFO (OverseerThreadFactory-3918-thread-5) [ c:collection1 s:shard1 ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:43452_ for creating new replica
[junit4] 2> 1609462 INFO (qtp639282180-14231) [n:127.0.0.1:43452_ x:collection1_shard1_replica_n25] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n25&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 1609578 INFO (zkCallback-5196-thread-2) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
[junit4] 2> 1609578 INFO (zkCallback-5196-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
[junit4] 2> 1609578 INFO (zkCallback-5180-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
[junit4] 2> 1609615 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Attempting to PeerSync from [http://127.0.0.1:34972/collection1_shard1_replica_n21/] - recoveringAfterStartup=[true]
[junit4] 2> 1609615 WARN (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.u.PeerSyncWithLeader no frame of reference to tell if we've missed updates
[junit4] 2> 1609615 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy PeerSync Recovery was not successful - trying replication.
[junit4] 2> 1609616 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Starting Replication Recovery.
[junit4] 2> 1609616 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Attempting to replicate from [http://127.0.0.1:34972/collection1_shard1_replica_n21/].
[junit4] 2> 1609619 INFO (qtp1951284671-14144) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1604915486905073664,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1609619 INFO (qtp1951284671-14144) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
[junit4] 2> 1609620 INFO (qtp1951284671-14144) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 1609620 INFO (qtp1951284671-14144) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.u.p.LogUpdateProcessorFactory [collection1_shard1_replica_n21] webapp= path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&commit_end_point=true&wt=javabin&version=2}{commit=} 0 2
[junit4] 2> 1609622 INFO (qtp1951284671-14150) [n:127.0.0.1:34972_ c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] o.a.s.c.S.Request [collection1_shard1_replica_n21] webapp= path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
[junit4] 2> 1609622 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.h.IndexFetcher Master's generation: 1
[junit4] 2> 1609622 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 1609622 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.h.IndexFetcher Slave's generation: 1
[junit4] 2> 1609622 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 1609622 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
[junit4] 2> 1609622 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.s.SolrIndexSearcher Opening [Searcher@4fd6a1e[collection1_shard1_replica_n23] main]
[junit4] 2> 1609624 INFO (searcherExecutor-3951-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.SolrCore [collection1_shard1_replica_n23] Registered new searcher Searcher@4fd6a1e[collection1_shard1_replica_n23] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1609624 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 1609624 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Replication Recovery was successful.
[junit4] 2> 1609624 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
[junit4] 2> 1609625 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.ZkShardTerms Successful update of terms at /collections/collection1/terms/shard1 to Terms{values={core_node22=0, core_node24=0}, version=3}
[junit4] 2> 1609625 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Updating version bucket highest from index after successful recovery.
[junit4] 2> 1609625 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1604915486911365120
[junit4] 2> 1609627 INFO (recoveryExecutor-5193-thread-1-processing-n:127.0.0.1:33972_ x:collection1_shard1_replica_n23 c:collection1 s:shard1 r:core_node24) [n:127.0.0.1:33972_ c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_n23] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[true]
[junit4] 2> 1609727 INFO (zkCallback-5196-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
[junit4] 2> 1609727 INFO (zkCallback-518
[...truncated too long message...]
perty disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
jar-checksums:
[mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null605395226
[copy] Copying 247 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null605395226
[delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null605395226
check-working-copy:
[ivy:cachepath] :: resolving dependencies :: org.eclipse.jgit#org.eclipse.jgit-caller;working
[ivy:cachepath] confs: [default]
[ivy:cachepath] found org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public
[ivy:cachepath] found com.jcraft#jsch;0.1.53 in public
[ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] found org.apache.httpcomponents#httpclient;4.3.6 in public
[ivy:cachepath] found org.apache.httpcomponents#httpcore;4.3.3 in public
[ivy:cachepath] found commons-logging#commons-logging;1.1.3 in public
[ivy:cachepath] found commons-codec#commons-codec;1.6 in public
[ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 52ms :: artifacts dl 12ms
---------------------------------------------------------------------
| | modules || artifacts |
| conf | number| search|dwnlded|evicted|| number|dwnlded|
---------------------------------------------------------------------
| default | 8 | 0 | 0 | 0 || 8 | 0 |
---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
[wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation
[wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[wc-checker] Checking working copy status...
-jenkins-base:
BUILD SUCCESSFUL
Total time: 190 minutes 25 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath$34.hasMatch(FilePath.java:2678)
at hudson.FilePath$34.invoke(FilePath.java:2557)
at hudson.FilePath$34.invoke(FilePath.java:2547)
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918)
Also: hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene2
at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
at hudson.remoting.Channel.call(Channel.java:955)
at hudson.FilePath.act(FilePath.java:1036)
at hudson.FilePath.act(FilePath.java:1025)
at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1819)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920)
at hudson.remoting.UserRequest.perform(UserRequest.java:212)
at hudson.remoting.UserRequest.perform(UserRequest.java:54)
at hudson.remoting.Request$2.run(Request.java:369)
at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath.act(FilePath.java:1038)
at hudson.FilePath.act(FilePath.java:1025)
at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1819)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)
[JENKINS] Lucene-Solr-Tests-7.x - Build # 665 - Still Unstable
Posted by Apache Jenkins Server <je...@builds.apache.org>.
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/665/
1 tests failed.
FAILED: org.apache.solr.cloud.cdcr.CdcrBidirectionalTest.testBiDir
Error Message:
Captured an uncaught exception in thread: Thread[id=7101, name=cdcr-replicator-2754-thread-1, state=RUNNABLE, group=TGRP-CdcrBidirectionalTest]
Stack Trace:
com.carrotsearch.randomizedtesting.UncaughtExceptionError: Captured an uncaught exception in thread: Thread[id=7101, name=cdcr-replicator-2754-thread-1, state=RUNNABLE, group=TGRP-CdcrBidirectionalTest]
Caused by: java.lang.AssertionError: 1605067526385958912 != 1605067526184632320
at __randomizedtesting.SeedInfo.seed([FBD20284F364263D]:0)
at org.apache.solr.update.CdcrUpdateLog$CdcrLogReader.forwardSeek(CdcrUpdateLog.java:611)
at org.apache.solr.handler.CdcrReplicator.run(CdcrReplicator.java:125)
at org.apache.solr.handler.CdcrReplicatorScheduler.lambda$null$0(CdcrReplicatorScheduler.java:81)
at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:209)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Build Log:
[...truncated 13176 lines...]
[junit4] Suite: org.apache.solr.cloud.cdcr.CdcrBidirectionalTest
[junit4] 2> 1007736 INFO (SUITE-CdcrBidirectionalTest-seed#[FBD20284F364263D]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> Creating dataDir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/init-core-data-001
[junit4] 2> 1007737 WARN (SUITE-CdcrBidirectionalTest-seed#[FBD20284F364263D]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=1 numCloses=1
[junit4] 2> 1007737 INFO (SUITE-CdcrBidirectionalTest-seed#[FBD20284F364263D]-worker) [ ] o.a.s.SolrTestCaseJ4 Using TrieFields (NUMERIC_POINTS_SYSPROP=false) w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 1007739 INFO (SUITE-CdcrBidirectionalTest-seed#[FBD20284F364263D]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 2> 1007769 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.SolrTestCaseJ4 ###Starting testBiDir
[junit4] 2> 1007770 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 1 servers in /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster2-001
[junit4] 2> 1007770 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 1007791 INFO (Thread-1734) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 1007791 INFO (Thread-1734) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 1007831 ERROR (Thread-1734) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 1007889 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.ZkTestServer start zk server on port:45672
[junit4] 2> 1007926 INFO (zkConnectionManagerCallback-1664-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1007950 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
[junit4] 2> 1008235 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 1008235 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 1008236 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 1008243 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@54abb302{/solr,null,AVAILABLE}
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@7d9be45f{SSL,[ssl, http/1.1]}{127.0.0.1:50963}
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.e.j.s.Server Started @1008383ms
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=50963}
[junit4] 2> 1008244 ERROR (jetty-launcher-1661-thread-1) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1008244 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 1008245 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-04T13:40:44.624Z
[junit4] 2> 1008273 INFO (zkConnectionManagerCallback-1666-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1008274 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 1008599 INFO (jetty-launcher-1661-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:45672/solr
[junit4] 2> 1008635 INFO (zkConnectionManagerCallback-1670-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1008679 INFO (zkConnectionManagerCallback-1672-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1008914 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1008916 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:50963_solr
[junit4] 2> 1008917 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.c.Overseer Overseer (id=74086820675584003-127.0.0.1:50963_solr-n_0000000000) starting
[junit4] 2> 1008987 INFO (zkConnectionManagerCallback-1679-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1008989 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:45672/solr ready
[junit4] 2> 1009026 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:50963_solr
[junit4] 2> 1009043 INFO (OverseerStateUpdate-74086820675584003-127.0.0.1:50963_solr-n_0000000000) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1009291 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 1009419 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_50963.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1009487 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_50963.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1009487 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_50963.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1009489 INFO (jetty-launcher-1661-thread-1) [n:127.0.0.1:50963_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster2-001/node1/.
[junit4] 2> 1009868 INFO (zkCallback-1678-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1010043 INFO (zkConnectionManagerCallback-1682-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1010072 INFO (zkConnectionManagerCallback-1685-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1010074 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 1 servers in /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster1-001
[junit4] 2> 1010074 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 1010083 INFO (Thread-1744) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 1010083 INFO (Thread-1744) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 1010106 ERROR (Thread-1744) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 1010183 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.ZkTestServer start zk server on port:35134
[junit4] 2> 1010211 INFO (zkConnectionManagerCallback-1689-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1010229 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
[junit4] 2> 1010306 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 1010306 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 1010306 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 1010332 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@5a3d836b{/solr,null,AVAILABLE}
[junit4] 2> 1010333 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.AbstractConnector Started ServerConnector@4e4d5bad{SSL,[ssl, http/1.1]}{127.0.0.1:52108}
[junit4] 2> 1010333 INFO (jetty-launcher-1686-thread-1) [ ] o.e.j.s.Server Started @1010473ms
[junit4] 2> 1010333 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=52108}
[junit4] 2> 1010669 ERROR (jetty-launcher-1686-thread-1) [ ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
[junit4] 2> 1010675 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 1010675 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version 7.5.0
[junit4] 2> 1010675 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 1010675 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 1010675 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time: 2018-07-04T13:40:47.055Z
[junit4] 2> 1010719 INFO (zkConnectionManagerCallback-1691-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1010719 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
[junit4] 2> 1012693 INFO (jetty-launcher-1686-thread-1) [ ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35134/solr
[junit4] 2> 1012694 INFO (zkConnectionManagerCallback-1695-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1012697 INFO (zkConnectionManagerCallback-1697-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1012868 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 1012868 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:52108_solr
[junit4] 2> 1012869 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.c.Overseer Overseer (id=74086820825530371-127.0.0.1:52108_solr-n_0000000000) starting
[junit4] 2> 1012919 INFO (zkConnectionManagerCallback-1704-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1012924 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35134/solr ready
[junit4] 2> 1012977 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:52108_solr
[junit4] 2> 1013547 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
[junit4] 2> 1013740 INFO (zkCallback-1703-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1013764 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52108.solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1013790 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52108.solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1013790 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52108.solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1013792 INFO (jetty-launcher-1686-thread-1) [n:127.0.0.1:52108_solr ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster1-001/node1/.
[junit4] 2> 1013824 INFO (zkCallback-1696-thread-1) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1013931 INFO (zkConnectionManagerCallback-1707-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1013983 INFO (zkConnectionManagerCallback-1710-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1013985 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.c.CdcrBidirectionalTest cluster2 zkHost = 127.0.0.1:45672/solr
[junit4] 2> 1013985 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.c.CdcrBidirectionalTest cluster1 zkHost = 127.0.0.1:35134/solr
[junit4] 2> 1014011 INFO (zkConnectionManagerCallback-1712-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1014027 INFO (zkConnectionManagerCallback-1716-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1014028 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1014029 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35134/solr ready
[junit4] 2> 1014415 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=cdcr-cluster1&maxShardsPerNode=2&name=cdcr-cluster1&nrtReplicas=1&action=CREATE&numShards=2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1014423 INFO (OverseerThreadFactory-2715-thread-1) [ ] o.a.s.c.a.c.CreateCollectionCmd Create collection cdcr-cluster1
[junit4] 2> 1014580 INFO (OverseerStateUpdate-74086820825530371-127.0.0.1:52108_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"cdcr-cluster1",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"cdcr-cluster1_shard1_replica_n1",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:52108/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 1014683 INFO (OverseerStateUpdate-74086820825530371-127.0.0.1:52108_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"cdcr-cluster1",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"cdcr-cluster1_shard2_replica_n3",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:52108/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 1014872 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=cdcr-cluster1&newCollection=true&collection=cdcr-cluster1&version=2&replicaType=NRT&coreNodeName=core_node4&name=cdcr-cluster1_shard2_replica_n3&action=CREATE&numShards=2&shard=shard2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin
[junit4] 2> 1014872 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
[junit4] 2> 1014885 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr x:cdcr-cluster1_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=cdcr-cluster1&newCollection=true&collection=cdcr-cluster1&version=2&replicaType=NRT&coreNodeName=core_node2&name=cdcr-cluster1_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin
[junit4] 2> 1015896 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1015896 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1015918 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.IndexSchema [cdcr-cluster1_shard1_replica_n1] Schema name=minimal
[junit4] 2> 1015918 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.IndexSchema [cdcr-cluster1_shard2_replica_n3] Schema name=minimal
[junit4] 2> 1015921 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
[junit4] 2> 1015921 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
[junit4] 2> 1015921 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'cdcr-cluster1_shard1_replica_n1' using configuration from collection cdcr-cluster1, trusted=true
[junit4] 2> 1015921 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.CoreContainer Creating SolrCore 'cdcr-cluster1_shard2_replica_n3' using configuration from collection cdcr-cluster1, trusted=true
[junit4] 2> 1015937 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52108.solr.core.cdcr-cluster1.shard1.replica_n1' (registry 'solr.core.cdcr-cluster1.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1015937 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1015938 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SolrCore [[cdcr-cluster1_shard1_replica_n1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster1-001/node1/cdcr-cluster1_shard1_replica_n1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster1-001/node1/./cdcr-cluster1_shard1_replica_n1/data/]
[junit4] 2> 1015948 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_52108.solr.core.cdcr-cluster1.shard2.replica_n3' (registry 'solr.core.cdcr-cluster1.shard2.replica_n3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1015948 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1015948 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SolrCore [[cdcr-cluster1_shard2_replica_n3] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster1-001/node1/cdcr-cluster1_shard2_replica_n3], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster1-001/node1/./cdcr-cluster1_shard2_replica_n3/data/]
[junit4] 2> 1016384 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 1016385 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1016424 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1016424 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1016471 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@7de1e067[cdcr-cluster1_shard1_replica_n1] main]
[junit4] 2> 1016473 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/cdcr-cluster1
[junit4] 2> 1016473 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/cdcr-cluster1
[junit4] 2> 1016473 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.ZkIndexSchemaReader Creating ZooKeeper watch for the managed schema at /configs/cdcr-cluster1/managed-schema
[junit4] 2> 1016474 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.ZkIndexSchemaReader Current schema version 0 is already the latest
[junit4] 2> 1016475 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1016516 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.h.CdcrBufferStateManager Created znode /collections/cdcr-cluster1/cdcr/state/buffer
[junit4] 2> 1016556 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.h.CdcrProcessStateManager Created znode /collections/cdcr-cluster1/cdcr/state/process
[junit4] 2> 1016951 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 1016951 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1016952 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1016953 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1016972 INFO (searcherExecutor-2721-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard1_replica_n1 c:cdcr-cluster1 s:shard1 r:core_node2) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SolrCore [cdcr-cluster1_shard1_replica_n1] Registered new searcher Searcher@7de1e067[cdcr-cluster1_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1016974 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605067502627323904
[junit4] 2> 1016981 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening [Searcher@38b5bb17[cdcr-cluster1_shard2_replica_n3] main]
[junit4] 2> 1016982 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/cdcr-cluster1
[junit4] 2> 1016983 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/cdcr-cluster1
[junit4] 2> 1016983 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.ZkIndexSchemaReader Creating ZooKeeper watch for the managed schema at /configs/cdcr-cluster1/managed-schema
[junit4] 2> 1016983 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.ZkIndexSchemaReader Current schema version 0 is already the latest
[junit4] 2> 1016984 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1016997 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/cdcr-cluster1/terms/shard1 to Terms{values={core_node2=0}, version=0}
[junit4] 2> 1017001 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 1017001 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 1017001 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:52108/solr/cdcr-cluster1_shard1_replica_n1/
[junit4] 2> 1017001 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 1017002 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SyncStrategy https://127.0.0.1:52108/solr/cdcr-cluster1_shard1_replica_n1/ has no replicas
[junit4] 2> 1017002 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
[junit4] 2> 1017009 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:52108/solr/cdcr-cluster1_shard1_replica_n1/ shard1
[junit4] 2> 1017011 INFO (zkCallback-1696-thread-1) [ ] o.a.s.h.CdcrLeaderStateManager Received new leader state @ cdcr-cluster1:shard1
[junit4] 2> 1017029 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605067502684995584
[junit4] 2> 1017043 INFO (searcherExecutor-2720-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SolrCore [cdcr-cluster1_shard2_replica_n3] Registered new searcher Searcher@38b5bb17[cdcr-cluster1_shard2_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1017245 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ZkShardTerms Successful update of terms at /collections/cdcr-cluster1/terms/shard2 to Terms{values={core_node4=0}, version=0}
[junit4] 2> 1017259 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 1017261 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=cdcr-cluster1&newCollection=true&collection=cdcr-cluster1&version=2&replicaType=NRT&coreNodeName=core_node2&name=cdcr-cluster1_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin} status=0 QTime=2376
[junit4] 2> 1017323 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 1017323 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 1017323 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:52108/solr/cdcr-cluster1_shard2_replica_n3/
[junit4] 2> 1017323 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 1017323 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SyncStrategy https://127.0.0.1:52108/solr/cdcr-cluster1_shard2_replica_n3/ has no replicas
[junit4] 2> 1017323 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
[junit4] 2> 1017384 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:52108/solr/cdcr-cluster1_shard2_replica_n3/ shard2
[junit4] 2> 1017387 INFO (zkCallback-1696-thread-1) [ ] o.a.s.h.CdcrLeaderStateManager Received new leader state @ cdcr-cluster1:shard2
[junit4] 2> 1017387 INFO (zkCallback-1696-thread-4) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/cdcr-cluster1/state.json] for collection [cdcr-cluster1] has occurred - updating... (live nodes size: [1])
[junit4] 2> 1017559 INFO (zkCallback-1696-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/cdcr-cluster1/state.json] for collection [cdcr-cluster1] has occurred - updating... (live nodes size: [1])
[junit4] 2> 1017629 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 1017631 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=cdcr-cluster1&newCollection=true&collection=cdcr-cluster1&version=2&replicaType=NRT&coreNodeName=core_node4&name=cdcr-cluster1_shard2_replica_n3&action=CREATE&numShards=2&shard=shard2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin} status=0 QTime=2759
[junit4] 2> 1017728 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
[junit4] 2> 1017799 INFO (zkCallback-1696-thread-4) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/cdcr-cluster1/state.json] for collection [cdcr-cluster1] has occurred - updating... (live nodes size: [1])
[junit4] 2> 1018455 INFO (OverseerCollectionConfigSetProcessor-74086820825530371-127.0.0.1:52108_solr-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 1018803 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=cdcr-cluster1&maxShardsPerNode=2&name=cdcr-cluster1&nrtReplicas=1&action=CREATE&numShards=2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin&version=2} status=0 QTime=4387
[junit4] 2> 1018847 INFO (zkConnectionManagerCallback-1720-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1019028 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x107359224e80007, likely client has closed socket
[junit4] 2> 1019059 INFO (zkConnectionManagerCallback-1724-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1019061 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1019064 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:45672/solr ready
[junit4] 2> 1019188 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=cdcr-cluster2&maxShardsPerNode=2&name=cdcr-cluster2&nrtReplicas=1&action=CREATE&numShards=2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin&version=2 and sendToOCPQueue=true
[junit4] 2> 1019271 INFO (OverseerThreadFactory-2702-thread-1) [ ] o.a.s.c.a.c.CreateCollectionCmd Create collection cdcr-cluster2
[junit4] 2> 1019528 INFO (OverseerStateUpdate-74086820675584003-127.0.0.1:50963_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"cdcr-cluster2",
[junit4] 2> "shard":"shard1",
[junit4] 2> "core":"cdcr-cluster2_shard1_replica_n1",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:50963/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 1019688 INFO (OverseerStateUpdate-74086820675584003-127.0.0.1:50963_solr-n_0000000000) [ ] o.a.s.c.o.SliceMutator createReplica() {
[junit4] 2> "operation":"ADDREPLICA",
[junit4] 2> "collection":"cdcr-cluster2",
[junit4] 2> "shard":"shard2",
[junit4] 2> "core":"cdcr-cluster2_shard2_replica_n2",
[junit4] 2> "state":"down",
[junit4] 2> "base_url":"https://127.0.0.1:50963/solr",
[junit4] 2> "type":"NRT",
[junit4] 2> "waitForFinalState":"false"}
[junit4] 2> 1020582 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=cdcr-cluster2&newCollection=true&collection=cdcr-cluster2&version=2&replicaType=NRT&coreNodeName=core_node3&name=cdcr-cluster2_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin
[junit4] 2> 1020583 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
[junit4] 2> 1020635 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=cdcr-cluster2&newCollection=true&collection=cdcr-cluster2&version=2&replicaType=NRT&coreNodeName=core_node4&name=cdcr-cluster2_shard2_replica_n2&action=CREATE&numShards=2&shard=shard2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin
[junit4] 2> 1021633 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1021656 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.5.0
[junit4] 2> 1021683 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.IndexSchema [cdcr-cluster2_shard1_replica_n1] Schema name=minimal
[junit4] 2> 1021686 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
[junit4] 2> 1021686 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'cdcr-cluster2_shard1_replica_n1' using configuration from collection cdcr-cluster2, trusted=true
[junit4] 2> 1021694 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.IndexSchema [cdcr-cluster2_shard2_replica_n2] Schema name=minimal
[junit4] 2> 1021705 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
[junit4] 2> 1021705 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.CoreContainer Creating SolrCore 'cdcr-cluster2_shard2_replica_n2' using configuration from collection cdcr-cluster2, trusted=true
[junit4] 2> 1021706 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_50963.solr.core.cdcr-cluster2.shard2.replica_n2' (registry 'solr.core.cdcr-cluster2.shard2.replica_n2') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1021706 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1021706 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SolrCore [[cdcr-cluster2_shard2_replica_n2] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster2-001/node1/cdcr-cluster2_shard2_replica_n2], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster2-001/node1/./cdcr-cluster2_shard2_replica_n2/data/]
[junit4] 2> 1021736 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr_50963.solr.core.cdcr-cluster2.shard1.replica_n1' (registry 'solr.core.cdcr-cluster2.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@558d7889
[junit4] 2> 1021736 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 1021736 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SolrCore [[cdcr-cluster2_shard1_replica_n1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster2-001/node1/cdcr-cluster2_shard1_replica_n1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.cdcr.CdcrBidirectionalTest_FBD20284F364263D-001/cdcr-cluster2-001/node1/./cdcr-cluster2_shard1_replica_n1/data/]
[junit4] 2> 1022483 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 1022483 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1022489 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1022489 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1022495 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.CdcrUpdateLog
[junit4] 2> 1022495 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 1022496 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 1022496 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 1022531 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.SolrIndexSearcher Opening [Searcher@65bd9a64[cdcr-cluster2_shard2_replica_n2] main]
[junit4] 2> 1022532 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@684f2885[cdcr-cluster2_shard1_replica_n1] main]
[junit4] 2> 1022540 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/cdcr-cluster2
[junit4] 2> 1022541 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/cdcr-cluster2
[junit4] 2> 1022541 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/cdcr-cluster2
[junit4] 2> 1022541 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.ZkIndexSchemaReader Creating ZooKeeper watch for the managed schema at /configs/cdcr-cluster2/managed-schema
[junit4] 2> 1022541 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/cdcr-cluster2
[junit4] 2> 1022541 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.ZkIndexSchemaReader Creating ZooKeeper watch for the managed schema at /configs/cdcr-cluster2/managed-schema
[junit4] 2> 1022542 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.ZkIndexSchemaReader Current schema version 0 is already the latest
[junit4] 2> 1022543 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1022544 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.ZkIndexSchemaReader Current schema version 0 is already the latest
[junit4] 2> 1022545 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 1022550 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrBufferStateManager Created znode /collections/cdcr-cluster2/cdcr/state/buffer
[junit4] 2> 1022553 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrProcessStateManager Created znode /collections/cdcr-cluster2/cdcr/state/process
[junit4] 2> 1022570 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605067508495155200
[junit4] 2> 1022578 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.ZkShardTerms Successful update of terms at /collections/cdcr-cluster2/terms/shard2 to Terms{values={core_node4=0}, version=0}
[junit4] 2> 1022583 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 1022583 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 1022583 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:50963/solr/cdcr-cluster2_shard2_replica_n2/
[junit4] 2> 1022584 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 1022584 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SyncStrategy https://127.0.0.1:50963/solr/cdcr-cluster2_shard2_replica_n2/ has no replicas
[junit4] 2> 1022584 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
[junit4] 2> 1022594 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:50963/solr/cdcr-cluster2_shard2_replica_n2/ shard2
[junit4] 2> 1022599 INFO (searcherExecutor-2732-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SolrCore [cdcr-cluster2_shard2_replica_n2] Registered new searcher Searcher@65bd9a64[cdcr-cluster2_shard2_replica_n2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1022621 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1605067508548632576
[junit4] 2> 1022623 INFO (zkCallback-1671-thread-1) [ ] o.a.s.h.CdcrLeaderStateManager Received new leader state @ cdcr-cluster2:shard2
[junit4] 2> 1022631 INFO (searcherExecutor-2733-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SolrCore [cdcr-cluster2_shard1_replica_n1] Registered new searcher Searcher@684f2885[cdcr-cluster2_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1022657 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/cdcr-cluster2/terms/shard1 to Terms{values={core_node3=0}, version=0}
[junit4] 2> 1022661 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 1022661 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 1022661 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:50963/solr/cdcr-cluster2_shard1_replica_n1/
[junit4] 2> 1022661 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 1022662 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SyncStrategy https://127.0.0.1:50963/solr/cdcr-cluster2_shard1_replica_n1/ has no replicas
[junit4] 2> 1022662 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
[junit4] 2> 1022666 INFO (zkCallback-1671-thread-1) [ ] o.a.s.h.CdcrLeaderStateManager Received new leader state @ cdcr-cluster2:shard1
[junit4] 2> 1022669 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:50963/solr/cdcr-cluster2_shard1_replica_n1/ shard1
[junit4] 2> 1022822 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 1022824 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 1022905 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=cdcr-cluster2&newCollection=true&collection=cdcr-cluster2&version=2&replicaType=NRT&coreNodeName=core_node4&name=cdcr-cluster2_shard2_replica_n2&action=CREATE&numShards=2&shard=shard2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin} status=0 QTime=2270
[junit4] 2> 1023066 INFO (zkCallback-1671-thread-1) [ ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/cdcr-cluster2/state.json] for collection [cdcr-cluster2] has occurred - updating... (live nodes size: [1])
[junit4] 2> 1023167 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=cdcr-cluster2&newCollection=true&collection=cdcr-cluster2&version=2&replicaType=NRT&coreNodeName=core_node3&name=cdcr-cluster2_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin} status=0 QTime=2585
[junit4] 2> 1023171 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
[junit4] 2> 1023171 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=cdcr-cluster2&maxShardsPerNode=2&name=cdcr-cluster2&nrtReplicas=1&action=CREATE&numShards=2&property.solr.directoryFactory=solr.StandardDirectoryFactory&wt=javabin&version=2} status=0 QTime=3983
[junit4] 2> 1023217 INFO (zkConnectionManagerCallback-1731-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1023218 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1023220 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:45672/solr ready
[junit4] 2> 1023284 INFO (OverseerCollectionConfigSetProcessor-74086820675584003-127.0.0.1:50963_solr-n_0000000000) [ ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may have disconnected from ZooKeeper
[junit4] 2> 1023410 INFO (qtp1718818795-6902) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1023411 INFO (qtp1718818795-6902) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=112
[junit4] 2> 1023788 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1023788 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=464
[junit4] 2> 1023831 INFO (qtp1718818795-6906) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={_stateVer_=cdcr-cluster2:4&action=COLLECTIONCHECKPOINT&wt=javabin&version=2} status=0 QTime=607
[junit4] 2> 1023832 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager Create new update log reader for target cdcr-cluster2 with checkpoint -1 @ cdcr-cluster1:shard2
[junit4] 2> 1023832 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager Attempting to bootstrap target collection: cdcr-cluster2, shard: shard2
[junit4] 2> 1023852 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager Submitting bootstrap task to executor
[junit4] 2> 1023894 INFO (cdcr-bootstrap-status-1727-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager Attempting to bootstrap target collection: cdcr-cluster2 shard: shard2 leader: https://127.0.0.1:50963/solr/cdcr-cluster2_shard2_replica_n2/
[junit4] 2> 1023895 INFO (zkCallback-1696-thread-4) [ ] o.a.s.h.CdcrProcessStateManager The CDCR process state has changed: WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/cdcr-cluster1/cdcr/state/process @ cdcr-cluster1:shard1
[junit4] 2> 1023896 INFO (zkCallback-1696-thread-1) [ ] o.a.s.h.CdcrProcessStateManager The CDCR process state has changed: WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/cdcr-cluster1/cdcr/state/process @ cdcr-cluster1:shard2
[junit4] 2> 1023896 INFO (zkCallback-1696-thread-1) [ ] o.a.s.h.CdcrProcessStateManager Received new CDCR process state from watcher: STARTED @ cdcr-cluster1:shard2
[junit4] 2> 1023896 INFO (zkCallback-1696-thread-4) [ ] o.a.s.h.CdcrProcessStateManager Received new CDCR process state from watcher: STARTED @ cdcr-cluster1:shard1
[junit4] 2> 1023897 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/cdcr params={qt=/cdcr&_stateVer_=cdcr-cluster1:7&action=start&wt=javabin&version=2} status=0 QTime=719
[junit4] 2> 1024007 INFO (zkConnectionManagerCallback-1736-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 1024008 INFO (zkCallback-1696-thread-4) [ ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 1024009 INFO (zkCallback-1696-thread-4) [ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:45672/solr ready
[junit4] 2> 1024053 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1024053 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1024125 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1024125 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1024143 INFO (qtp1718818795-6902) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={_stateVer_=cdcr-cluster2:4&action=COLLECTIONCHECKPOINT&wt=javabin&version=2} status=0 QTime=130
[junit4] 2> 1024183 INFO (zkCallback-1696-thread-4) [ ] o.a.s.h.CdcrReplicatorManager Create new update log reader for target cdcr-cluster2 with checkpoint -1 @ cdcr-cluster1:shard1
[junit4] 2> 1024184 INFO (zkCallback-1696-thread-4) [ ] o.a.s.h.CdcrReplicatorManager Attempting to bootstrap target collection: cdcr-cluster2, shard: shard1
[junit4] 2> 1024184 INFO (zkCallback-1696-thread-4) [ ] o.a.s.h.CdcrReplicatorManager Submitting bootstrap task to executor
[junit4] 2> 1024184 INFO (qtp1718818795-6905) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={qt=/cdcr&masterUrl=https://127.0.0.1:52108/solr/cdcr-cluster1_shard2_replica_n3/&action=BOOTSTRAP&wt=javabin&version=2} status=0 QTime=63
[junit4] 2> 1024187 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={qt=/cdcr&action=BOOTSTRAP_STATUS&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1024187 INFO (cdcr-bootstrap-status-1727-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager CDCR bootstrap running for 1 seconds, sleeping for 2000 ms
[junit4] 2> 1024317 INFO (cdcr-bootstrap-status-1732-thread-1) [ ] o.a.s.h.CdcrReplicatorManager Attempting to bootstrap target collection: cdcr-cluster2 shard: shard1 leader: https://127.0.0.1:50963/solr/cdcr-cluster2_shard1_replica_n1/
[junit4] 2> 1024323 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 1024339 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={qt=/cdcr&masterUrl=https://127.0.0.1:52108/solr/cdcr-cluster1_shard1_replica_n1/&action=BOOTSTRAP&wt=javabin&version=2} status=0 QTime=6
[junit4] 2> 1024340 INFO (qtp1718818795-6906) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={qt=/cdcr&action=BOOTSTRAP_STATUS&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1024340 INFO (cdcr-bootstrap-status-1732-thread-1) [ ] o.a.s.h.CdcrReplicatorManager CDCR bootstrap running for 1 seconds, sleeping for 2000 ms
[junit4] 2> 1024341 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 1024497 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1605067510515761152,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1024597 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1605067510620618752,optimize=false,openSearcher=false,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1024597 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
[junit4] 2> 1024615 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
[junit4] 2> 1024616 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 1024616 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&commit_end_point=true&wt=javabin&version=2} status=0 QTime=120
[junit4] 2> 1024637 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
[junit4] 2> 1024638 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.IndexFetcher Master's generation: 1
[junit4] 2> 1024638 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 1024638 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.IndexFetcher Slave's generation: 1
[junit4] 2> 1024638 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 1024638 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
[junit4] 2> 1024679 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 1024679 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster1_shard1_replica_n1] webapp=/solr path=/update params={waitSearcher=true&openSearcher=false&commit=true&softCommit=false&commit_end_point=true&wt=javabin&version=2} status=0 QTime=82
[junit4] 2> 1024700 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster1_shard1_replica_n1] webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
[junit4] 2> 1024719 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.IndexFetcher Master's generation: 1
[junit4] 2> 1024719 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.IndexFetcher Master's version: 0
[junit4] 2> 1024719 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.IndexFetcher Slave's generation: 1
[junit4] 2> 1024719 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.IndexFetcher Slave's version: 0
[junit4] 2> 1024719 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
[junit4] 2> 1024721 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.s.SolrIndexSearcher Opening [Searcher@35f2d772[cdcr-cluster2_shard2_replica_n2] main]
[junit4] 2> 1024836 INFO (searcherExecutor-2732-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.SolrCore [cdcr-cluster2_shard2_replica_n2] Registered new searcher Searcher@35f2d772[cdcr-cluster2_shard2_replica_n2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1024847 INFO (recoveryExecutor-1668-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard2_replica_n2 c:cdcr-cluster2 s:shard2 r:core_node4) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrRequestHandler No replay needed.
[junit4] 2> 1024854 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@55cae9f4[cdcr-cluster2_shard1_replica_n1] main]
[junit4] 2> 1024856 INFO (searcherExecutor-2733-thread-1-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.SolrCore [cdcr-cluster2_shard1_replica_n1] Registered new searcher Searcher@55cae9f4[cdcr-cluster2_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 1024856 INFO (recoveryExecutor-1668-thread-2-processing-n:127.0.0.1:50963_solr x:cdcr-cluster2_shard1_replica_n1 c:cdcr-cluster2 s:shard1 r:core_node3) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.CdcrRequestHandler No replay needed.
[junit4] 2> 1025939 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.c.CdcrBidirectionalTest Adding 10 docs with commit=true, numDocs=100
[junit4] 2> 1026028 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/cdcr-cluster1/terms/shard1 to Terms{values={core_node2=1}, version=1}
[junit4] 2> 1026029 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster1_shard1_replica_n1] webapp=/solr path=/update params={_stateVer_=cdcr-cluster1:7&wt=javabin&version=2} status=0 QTime=61
[junit4] 2> 1026029 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.ZkShardTerms Successful update of terms at /collections/cdcr-cluster1/terms/shard2 to Terms{values={core_node4=1}, version=1}
[junit4] 2> 1026029 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/update params={_stateVer_=cdcr-cluster1:7&wt=javabin&version=2} status=0 QTime=21
[junit4] 2> 1026139 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1605067512237522944,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1026139 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@6d8e75a4 commitCommandVersion:1605067512237522944
[junit4] 2> 1026150 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1605067512249057280,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1026151 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@7d06e1cb commitCommandVersion:1605067512249057280
[junit4] 2> 1026189 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={qt=/cdcr&action=BOOTSTRAP_STATUS&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1026189 INFO (cdcr-bootstrap-status-1727-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager CDCR bootstrap successful in 3 seconds
[junit4] 2> 1026227 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@27834724[cdcr-cluster1_shard1_replica_n1] main]
[junit4] 2> 1026229 INFO (searcherExecutor-2721-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard1_replica_n1 c:cdcr-cluster1 s:shard1 r:core_node2) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.SolrCore [cdcr-cluster1_shard1_replica_n1] Registered new searcher Searcher@27834724[cdcr-cluster1_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.5.0):C43)))}
[junit4] 2> 1026246 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1026246 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1026253 INFO (qtp1718818795-6906) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1026253 INFO (qtp1718818795-6906) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1026254 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening [Searcher@3df1d250[cdcr-cluster1_shard2_replica_n3] main]
[junit4] 2> 1026256 INFO (searcherExecutor-2720-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.SolrCore [cdcr-cluster1_shard2_replica_n3] Registered new searcher Searcher@3df1d250[cdcr-cluster1_shard2_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.5.0):C57)))}
[junit4] 2> 1026274 INFO (qtp1718818795-6899) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={_stateVer_=cdcr-cluster2:4&action=COLLECTIONCHECKPOINT&wt=javabin&version=2} status=0 QTime=83
[junit4] 2> 1026274 INFO (cdcr-bootstrap-status-1727-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager Create new update log reader for target cdcr-cluster2 with checkpoint -1 @ cdcr-cluster1:shard2
[junit4] 2> 1026275 INFO (cdcr-bootstrap-status-1727-thread-1-processing-n:127.0.0.1:52108_solr x:cdcr-cluster1_shard2_replica_n3 c:cdcr-cluster1 s:shard2 r:core_node4) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.h.CdcrReplicatorManager Bootstrap successful, giving the go-ahead to replicator
[junit4] 2> 1026280 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 1026281 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster1_shard1_replica_n1] webapp=/solr path=/update params={update.distrib=FROMLEADER&update.chain=cdcr-processor-chain&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:52108/solr/cdcr-cluster1_shard2_replica_n3/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false} status=0 QTime=130
[junit4] 2> 1026283 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 1026283 INFO (qtp504181695-6956) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/update params={update.distrib=FROMLEADER&update.chain=cdcr-processor-chain&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:52108/solr/cdcr-cluster1_shard2_replica_n3/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false} status=0 QTime=144
[junit4] 2> 1026283 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/update params={_stateVer_=cdcr-cluster1:7&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2} status=0 QTime=251
[junit4] 2> 1026283 INFO (TEST-CdcrBidirectionalTest.testBiDir-seed#[FBD20284F364263D]) [ ] o.a.s.c.c.CdcrBidirectionalTest Adding 10 docs with commit=true, numDocs=200
[junit4] 2> 1026305 INFO (qtp504181695-6960) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.c.S.Request [cdcr-cluster1_shard2_replica_n3] webapp=/solr path=/update params={_stateVer_=cdcr-cluster1:7&wt=javabin&version=2} status=0 QTime=20
[junit4] 2> 1026307 INFO (qtp504181695-6959) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster1_shard1_replica_n1] webapp=/solr path=/update params={_stateVer_=cdcr-cluster1:7&wt=javabin&version=2} status=0 QTime=22
[junit4] 2> 1026316 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1605067512423120896,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1026317 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1605067512424169472,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 1026318 INFO (qtp504181695-6957) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard1 r:core_node2 x:cdcr-cluster1_shard1_replica_n1] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@7d06e1cb commitCommandVersion:1605067512423120896
[junit4] 2> 1026321 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@6d8e75a4 commitCommandVersion:1605067512424169472
[junit4] 2> 1026352 INFO (qtp1718818795-6906) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={qt=/cdcr&action=BOOTSTRAP_STATUS&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1026352 INFO (cdcr-bootstrap-status-1732-thread-1) [ ] o.a.s.h.CdcrReplicatorManager CDCR bootstrap successful in 3 seconds
[junit4] 2> 1026450 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1026450 INFO (qtp1718818795-6903) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard2 r:core_node4 x:cdcr-cluster2_shard2_replica_n2] o.a.s.c.S.Request [cdcr-cluster2_shard2_replica_n2] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1026481 INFO (qtp1718818795-6902) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.h.CdcrRequestHandler Found maxVersionFromRecent 0 maxVersionFromIndex 0
[junit4] 2> 1026481 INFO (qtp1718818795-6902) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={action=SHARDCHECKPOINT&wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 1026484 INFO (qtp1718818795-6906) [n:127.0.0.1:50963_solr c:cdcr-cluster2 s:shard1 r:core_node3 x:cdcr-cluster2_shard1_replica_n1] o.a.s.c.S.Request [cdcr-cluster2_shard1_replica_n1] webapp=/solr path=/cdcr params={_stateVer_=cdcr-cluster2:4&action=COLLECTIONCHECKPOINT&wt=javabin&version=2} status=0 QTime=131
[junit4] 2> 1026484 INFO (cdcr-bootstrap-status-1732-thread-1) [ ] o.a.s.h.CdcrReplicatorManager Create new update log reader for target cdcr-cluster2 with checkpoint -1 @ cdcr-cluster1:shard1
[junit4] 2> 1026485 INFO (cdcr-bootstrap-status-1732-thread-1) [ ] o.a.s.h.CdcrReplicatorManager Bootstrap successful, giving the go-ahead to replicator
[junit4] 2> 1026506 INFO (qtp504181695-6958) [n:127.0.0.1:52108_solr c:cdcr-cluster1 s:shard2 r:core_node4 x:cdcr-cluster1_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening [Searcher@74108373[cdcr-cluster1_shard2_replica_n3] main]
[junit4] 2> 1026508 INFO (searcherExecutor-2720-thread-1-processing-n:127.0.0.1:52108_solr
[...truncated too long message...]
ck:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
jar-checksums:
[mkdir] Created dir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null1320529153
[copy] Copying 247 files to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null1320529153
[delete] Deleting directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null1320529153
check-working-copy:
[ivy:cachepath] :: resolving dependencies :: org.eclipse.jgit#org.eclipse.jgit-caller;working
[ivy:cachepath] confs: [default]
[ivy:cachepath] found org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public
[ivy:cachepath] found com.jcraft#jsch;0.1.53 in public
[ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] found org.apache.httpcomponents#httpclient;4.3.6 in public
[ivy:cachepath] found org.apache.httpcomponents#httpcore;4.3.3 in public
[ivy:cachepath] found commons-logging#commons-logging;1.1.3 in public
[ivy:cachepath] found commons-codec#commons-codec;1.6 in public
[ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 27ms :: artifacts dl 1ms
---------------------------------------------------------------------
| | modules || artifacts |
| conf | number| search|dwnlded|evicted|| number|dwnlded|
---------------------------------------------------------------------
| default | 8 | 0 | 0 | 0 || 8 | 0 |
---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
[wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation
[wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[wc-checker] Checking working copy status...
-jenkins-base:
BUILD SUCCESSFUL
Total time: 153 minutes 58 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath$34.hasMatch(FilePath.java:2678)
at hudson.FilePath$34.invoke(FilePath.java:2557)
at hudson.FilePath$34.invoke(FilePath.java:2547)
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918)
Also: hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene
at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
at hudson.remoting.Channel.call(Channel.java:955)
at hudson.FilePath.act(FilePath.java:1036)
at hudson.FilePath.act(FilePath.java:1025)
at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1819)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920)
at hudson.remoting.UserRequest.perform(UserRequest.java:212)
at hudson.remoting.UserRequest.perform(UserRequest.java:54)
at hudson.remoting.Request$2.run(Request.java:369)
at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath.act(FilePath.java:1038)
at hudson.FilePath.act(FilePath.java:1025)
at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1819)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)
[JENKINS] Lucene-Solr-Tests-7.x - Build # 664 - Still Unstable
Posted by Apache Jenkins Server <je...@builds.apache.org>.
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/664/
1 tests failed.
FAILED: org.apache.solr.common.cloud.SolrZkClientTest.testCheckInterrupted
Error Message:
Stack Trace:
java.lang.InterruptedException
at __randomizedtesting.SeedInfo.seed([7F43284EB6A7680A:B7C1A357DBF85807]:0)
at java.lang.Object.wait(Native Method)
at java.lang.Thread.join(Thread.java:1252)
at java.lang.Thread.join(Thread.java:1326)
at org.apache.solr.cloud.ZkTestServer.shutdown(ZkTestServer.java:532)
at org.apache.solr.common.cloud.SolrZkClientTest.tearDown(SolrZkClientTest.java:106)
at sun.reflect.GeneratedMethodAccessor7.invoke(Unknown Source)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:992)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Build Log:
[...truncated 16308 lines...]
[junit4] Suite: org.apache.solr.common.cloud.SolrZkClientTest
[junit4] 2> 362634 INFO (SUITE-SolrZkClientTest-seed#[7F43284EB6A7680A]-worker) [ ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> Creating dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-solrj/test/J0/temp/solr.common.cloud.SolrZkClientTest_7F43284EB6A7680A-001/init-core-data-001
[junit4] 2> 362635 WARN (SUITE-SolrZkClientTest-seed#[7F43284EB6A7680A]-worker) [ ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=8 numCloses=8
[junit4] 2> 362638 INFO (SUITE-SolrZkClientTest-seed#[7F43284EB6A7680A]-worker) [ ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 362640 INFO (SUITE-SolrZkClientTest-seed#[7F43284EB6A7680A]-worker) [ ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
[junit4] 2> 362695 INFO (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.s.SolrTestCaseJ4 ###Starting testSimpleUpdateACLs
[junit4] 2> 362695 INFO (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.c.SolrZkClientTest ZooKeeper dataDir:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-solrj/test/J0/temp/solr.common.cloud.SolrZkClientTest_7F43284EB6A7680A-001/tempDir-001
[junit4] 2> 362695 INFO (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 362731 INFO (Thread-547) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 362731 INFO (Thread-547) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 362799 ERROR (Thread-547) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 362842 INFO (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.ZkTestServer start zk server on port:36575
[junit4] 2> 363002 INFO (zkConnectionManagerCallback-999-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 363112 INFO (zkConnectionManagerCallback-1001-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 363221 INFO (zkConnectionManagerCallback-1003-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 363353 INFO (zkConnectionManagerCallback-1005-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 363606 ERROR (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 363732 INFO (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:36575 36575
[junit4] 2> 363975 INFO (Thread-547) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:36575 36575
[junit4] 2> 363988 INFO (TEST-SolrZkClientTest.testSimpleUpdateACLs-seed#[7F43284EB6A7680A]) [ ] o.a.s.SolrTestCaseJ4 ###Ending testSimpleUpdateACLs
[junit4] 2> 363989 INFO (TEST-SolrZkClientTest.testCheckInterrupted-seed#[7F43284EB6A7680A]) [ ] o.a.s.SolrTestCaseJ4 ###Starting testCheckInterrupted
[junit4] 2> 363990 INFO (TEST-SolrZkClientTest.testCheckInterrupted-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.c.SolrZkClientTest ZooKeeper dataDir:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-solrj/test/J0/temp/solr.common.cloud.SolrZkClientTest_7F43284EB6A7680A-001/tempDir-002
[junit4] 2> 363990 INFO (TEST-SolrZkClientTest.testCheckInterrupted-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 363996 INFO (Thread-548) [ ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 363996 INFO (Thread-548) [ ] o.a.s.c.ZkTestServer Starting server
[junit4] 2> 364033 ERROR (Thread-548) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 364105 INFO (TEST-SolrZkClientTest.testCheckInterrupted-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.ZkTestServer start zk server on port:44487
[junit4] 2> 364240 INFO (zkConnectionManagerCallback-1007-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 364460 INFO (zkConnectionManagerCallback-1009-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 364488 INFO (zkConnectionManagerCallback-1011-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 364564 INFO (zkConnectionManagerCallback-1013-thread-1) [ ] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 364637 ERROR (TEST-SolrZkClientTest.testCheckInterrupted-seed#[7F43284EB6A7680A]) [ ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 364652 INFO (TEST-SolrZkClientTest.testCheckInterrupted-seed#[7F43284EB6A7680A]) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:44487 44487
[junit4] 2> 364707 INFO (Thread-548) [ ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:44487 44487
[junit4] 2> NOTE: reproduce with: ant test -Dtestcase=SolrZkClientTest -Dtests.method=testCheckInterrupted -Dtests.seed=7F43284EB6A7680A -Dtests.multiplier=2 -Dtests.slow=true -Dtests.locale=pt-PT -Dtests.timezone=Brazil/West -Dtests.asserts=true -Dtests.file.encoding=UTF-8
[junit4] ERROR 0.76s J0 | SolrZkClientTest.testCheckInterrupted <<<
[junit4] > Throwable #1: java.lang.InterruptedException
[junit4] > at __randomizedtesting.SeedInfo.seed([7F43284EB6A7680A:B7C1A357DBF85807]:0)
[junit4] > at java.lang.Object.wait(Native Method)
[junit4] > at java.lang.Thread.join(Thread.java:1252)
[junit4] > at java.lang.Thread.join(Thread.java:1326)
[junit4] > at org.apache.solr.cloud.ZkTestServer.shutdown(ZkTestServer.java:532)
[junit4] > at org.apache.solr.common.cloud.SolrZkClientTest.tearDown(SolrZkClientTest.java:106)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] 2> NOTE: leaving temporary files on disk at: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-solrj/test/J0/temp/solr.common.cloud.SolrZkClientTest_7F43284EB6A7680A-001
[junit4] 2> Jul 03, 2018 3:46:15 PM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
[junit4] 2> WARNING: Will linger awaiting termination of 1 leaked thread(s).
[junit4] 2> NOTE: test params are: codec=Asserting(Lucene70): {}, docValues:{}, maxPointsInLeafNode=645, maxMBSortInHeap=7.643278999047528, sim=RandomSimilarity(queryNorm=true): {}, locale=pt-PT, timezone=Brazil/West
[junit4] 2> NOTE: Linux 4.4.0-112-generic amd64/Oracle Corporation 1.8.0_172 (64-bit)/cpus=4,threads=1,free=77949696,total=362283008
[junit4] 2> NOTE: All tests run in this JVM: [TestConfigSetAdminRequest, TestLang, SolrExampleEmbeddedTest, LessThanEvaluatorTest, UsingSolrJRefGuideExamplesTest, SolrExampleBinaryTest, GreaterThanEvaluatorTest, AndEvaluatorTest, ContentStreamTest, TestV2Request, SchemaTest, TestPathTrie, JdbcDriverTest, SolrQueryTest, SolrExampleStreamingTest, TestTimeSource, StreamingTest, NaturalLogEvaluatorTest, QueryResponseTest, TestFastInputStream, ClientUtilsTest, EmpiricalDistributionEvaluatorTest, StreamExpressionTest, MathExpressionTest, SolrPingTest, TestDelegationTokenRequest, FacetFieldTest, TestDelegationTokenResponse, TestSpellCheckResponse, TestSuggesterResponse, SolrDocumentTest, TestToleratedUpdateError, SolrZkClientTest]
[junit4] Completed [139/154 (1!)] on J0 in 2.67s, 2 tests, 1 error <<< FAILURES!
[...truncated 49563 lines...]
[asciidoctor:convert] asciidoctor: ERROR: about-this-guide.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.)
[asciidoctor:convert] asciidoctor: ERROR: solr-glossary.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.)
[java] Processed 2227 links (1778 relative) to 3000 anchors in 230 files
[echo] Validated Links & Anchors via: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-ref-guide/bare-bones-html/
-documentation-lint:
[jtidy] Checking for broken html (such as invalid tags)...
[delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/build/jtidy_tmp
[echo] Checking for broken links...
[exec]
[exec] Crawl/parse...
[exec]
[exec] Verify...
[echo] Checking for malformed docs...
jar-checksums:
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
common.resolve:
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
jar-checksums:
[mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/null400980957
[copy] Copying 39 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/null400980957
[delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/null400980957
resolve-example:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
resolve-server:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/lucene/top-level-ivy-settings.xml
resolve:
jar-checksums:
[mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null1670919068
[copy] Copying 247 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null1670919068
[delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/null1670919068
check-working-copy:
[ivy:cachepath] :: resolving dependencies :: org.eclipse.jgit#org.eclipse.jgit-caller;working
[ivy:cachepath] confs: [default]
[ivy:cachepath] found org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public
[ivy:cachepath] found com.jcraft#jsch;0.1.53 in public
[ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] found org.apache.httpcomponents#httpclient;4.3.6 in public
[ivy:cachepath] found org.apache.httpcomponents#httpcore;4.3.3 in public
[ivy:cachepath] found commons-logging#commons-logging;1.1.3 in public
[ivy:cachepath] found commons-codec#commons-codec;1.6 in public
[ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 69ms :: artifacts dl 23ms
---------------------------------------------------------------------
| | modules || artifacts |
| conf | number| search|dwnlded|evicted|| number|dwnlded|
---------------------------------------------------------------------
| default | 8 | 0 | 0 | 0 || 8 | 0 |
---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
[wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation
[wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[wc-checker] Checking working copy status...
-jenkins-base:
BUILD SUCCESSFUL
Total time: 194 minutes 53 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath$34.hasMatch(FilePath.java:2678)
at hudson.FilePath$34.invoke(FilePath.java:2557)
at hudson.FilePath$34.invoke(FilePath.java:2547)
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918)
Also: hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene2
at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
at hudson.remoting.Channel.call(Channel.java:955)
at hudson.FilePath.act(FilePath.java:1036)
at hudson.FilePath.act(FilePath.java:1025)
at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1819)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920)
at hudson.remoting.UserRequest.perform(UserRequest.java:212)
at hudson.remoting.UserRequest.perform(UserRequest.java:54)
at hudson.remoting.Request$2.run(Request.java:369)
at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath.act(FilePath.java:1038)
at hudson.FilePath.act(FilePath.java:1025)
at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1819)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)