You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@lucene.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2018/12/29 11:03:14 UTC

[JENKINS] Lucene-Solr-repro - Build # 2582 - Unstable

Build: https://builds.apache.org/job/Lucene-Solr-repro/2582/

[...truncated 28 lines...]
[repro] Jenkins log URL: https://builds.apache.org/job/Lucene-Solr-BadApples-NightlyTests-master/43/consoleText

[repro] Revision: 345a655f216258c406c384ada9aa6d5f14e254f9

[repro] Ant options: -Dtests.multiplier=2 -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt
[repro] Repro line:  ant test  -Dtestcase=TestSolrConfigHandlerCloud -Dtests.method=test -Dtests.seed=988D9624F13AA4D8 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=ro -Dtests.timezone=America/Bogota -Dtests.asserts=true -Dtests.file.encoding=US-ASCII

[repro] Repro line:  ant test  -Dtestcase=HdfsRecoveryZkTest -Dtests.seed=988D9624F13AA4D8 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=sr-CS -Dtests.timezone=Etc/GMT+4 -Dtests.asserts=true -Dtests.file.encoding=US-ASCII

[repro] Repro line:  ant test  -Dtestcase=SearchRateTriggerIntegrationTest -Dtests.method=testDeleteNode -Dtests.seed=988D9624F13AA4D8 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=en-SG -Dtests.timezone=Chile/Continental -Dtests.asserts=true -Dtests.file.encoding=US-ASCII

[repro] Repro line:  ant test  -Dtestcase=HdfsRestartWhileUpdatingTest -Dtests.seed=988D9624F13AA4D8 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=es-AR -Dtests.timezone=ACT -Dtests.asserts=true -Dtests.file.encoding=US-ASCII

[repro] Repro line:  ant test  -Dtestcase=TestSimTriggerIntegration -Dtests.method=testNodeMarkersRegistration -Dtests.seed=988D9624F13AA4D8 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=ca-ES -Dtests.timezone=ECT -Dtests.asserts=true -Dtests.file.encoding=US-ASCII

[repro] git rev-parse --abbrev-ref HEAD
[repro] git rev-parse HEAD
[repro] Initial local git branch/revision: 345a655f216258c406c384ada9aa6d5f14e254f9
[repro] git fetch
[repro] git checkout 345a655f216258c406c384ada9aa6d5f14e254f9

[...truncated 1 lines...]
[repro] git merge --ff-only

[...truncated 1 lines...]
[repro] ant clean

[...truncated 6 lines...]
[repro] Test suites by module:
[repro]    solr/core
[repro]       TestSimTriggerIntegration
[repro]       HdfsRestartWhileUpdatingTest
[repro]       HdfsRecoveryZkTest
[repro]       TestSolrConfigHandlerCloud
[repro]       SearchRateTriggerIntegrationTest
[repro] ant compile-test

[...truncated 3592 lines...]
[repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=25 -Dtests.class="*.TestSimTriggerIntegration|*.HdfsRestartWhileUpdatingTest|*.HdfsRecoveryZkTest|*.TestSolrConfigHandlerCloud|*.SearchRateTriggerIntegrationTest" -Dtests.showOutput=onerror -Dtests.multiplier=2 -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.seed=988D9624F13AA4D8 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=ca-ES -Dtests.timezone=ECT -Dtests.asserts=true -Dtests.file.encoding=US-ASCII

[...truncated 55949 lines...]
   [junit4]   2> 453046 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@254390ce (control_collection_shard1_replica_n1) has a reference count of -1
   [junit4]   2> 453053 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@31d1a577 (collection1_shard1_replica_n5) has a reference count of -1
   [junit4]   2> 453053 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@357ad911 (collection1_shard1_replica_n3) has a reference count of -1
   [junit4]   2> 453054 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@5ef1b8ec (collection1_shard1_replica_n1) has a reference count of -1
   [junit4]   2> 453054 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@719d3193 (collection1_shard1_replica_n5) has a reference count of -1
   [junit4]   2> 453054 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@1569e2a1 (collection1_shard1_replica_n3) has a reference count of -1
   [junit4]   2> 453054 ERROR (Finalizer) [    ] o.a.s.c.SolrCore REFCOUNT ERROR: unreferenced org.apache.solr.core.SolrCore@13384c48 (collection1_shard1_replica_n1) has a reference count of -1
   [junit4]   2> 453115 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/init-core-data-001
   [junit4]   2> 453117 WARN  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=7 numCloses=7
   [junit4]   2> 453117 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) w/NUMERIC_DOCVALUES_SYSPROP=false
   [junit4]   2> 453120 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (false) via: @org.apache.solr.util.RandomizeSSL(reason=, value=NaN, ssl=NaN, clientAuth=NaN)
   [junit4]   2> 453129 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 453210 WARN  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 453221 WARN  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 453223 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log jetty-6.1.26
   [junit4]   2> 453247 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log Extract jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs to ./temp/Jetty_localhost_43585_hdfs____b14ddp/webapp
   [junit4]   2> 453678 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:43585
   [junit4]   2> 453863 WARN  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 453865 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log jetty-6.1.26
   [junit4]   2> 453890 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log Extract jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_34001_datanode____n5w5xz/webapp
   [junit4]   2> 454343 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:34001
   [junit4]   2> 454430 WARN  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 454432 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log jetty-6.1.26
   [junit4]   2> 454454 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log Extract jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_43030_datanode____xr9tp1/webapp
   [junit4]   2> 454589 ERROR (DataNode: [[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/tempDir-001/hdfsBaseDir/data/data1/, [DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/tempDir-001/hdfsBaseDir/data/data2/]]  heartbeating to localhost/127.0.0.1:34449) [    ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000
   [junit4]   2> 454623 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x19a2dbed133971: from storage DS-bec431a2-3998-4100-a6cd-025eefa292a4 node DatanodeRegistration(127.0.0.1:43280, datanodeUuid=1e6be087-edad-45ac-aed2-a6315e9766ae, infoPort=44349, infoSecurePort=0, ipcPort=36257, storageInfo=lv=-56;cid=testClusterID;nsid=1290777053;c=0), blocks: 0, hasStaleStorage: true, processing time: 1 msecs
   [junit4]   2> 454623 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x19a2dbed133971: from storage DS-fb9b3349-19e4-4319-aa54-52f22d177d40 node DatanodeRegistration(127.0.0.1:43280, datanodeUuid=1e6be087-edad-45ac-aed2-a6315e9766ae, infoPort=44349, infoSecurePort=0, ipcPort=36257, storageInfo=lv=-56;cid=testClusterID;nsid=1290777053;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 454914 INFO  (SUITE-HdfsRestartWhileUpdatingTest-seed#[988D9624F13AA4D8]-worker) [    ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:43030
   [junit4]   2> 455230 ERROR (DataNode: [[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/tempDir-001/hdfsBaseDir/data/data3/, [DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/tempDir-001/hdfsBaseDir/data/data4/]]  heartbeating to localhost/127.0.0.1:34449) [    ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000
   [junit4]   2> 455244 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x19a2dc123017b1: from storage DS-c4a0be18-b8e8-4d19-ba11-eea6f9392bd2 node DatanodeRegistration(127.0.0.1:45569, datanodeUuid=70cce323-9e2a-4371-8a64-5601f340f4b4, infoPort=40751, infoSecurePort=0, ipcPort=45879, storageInfo=lv=-56;cid=testClusterID;nsid=1290777053;c=0), blocks: 0, hasStaleStorage: true, processing time: 1 msecs
   [junit4]   2> 455245 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x19a2dc123017b1: from storage DS-42c1d2a9-85fe-420f-8bb7-86b892ef850e node DatanodeRegistration(127.0.0.1:45569, datanodeUuid=70cce323-9e2a-4371-8a64-5601f340f4b4, infoPort=40751, infoSecurePort=0, ipcPort=45879, storageInfo=lv=-56;cid=testClusterID;nsid=1290777053;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 455425 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 455426 INFO  (ZkTestServer Run Thread) [    ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 455426 INFO  (ZkTestServer Run Thread) [    ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 455526 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer start zk server on port:35950
   [junit4]   2> 455526 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:35950
   [junit4]   2> 455526 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 35950
   [junit4]   2> 455548 INFO  (zkConnectionManagerCallback-258-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 455581 INFO  (zkConnectionManagerCallback-260-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 455583 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
   [junit4]   2> 455586 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/schema15.xml to /configs/conf1/schema.xml
   [junit4]   2> 455594 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 455600 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
   [junit4]   2> 455606 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
   [junit4]   2> 455608 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
   [junit4]   2> 455615 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
   [junit4]   2> 455617 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 455619 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 455621 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
   [junit4]   2> 455622 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkTestServer put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
   [junit4]   2> 455628 INFO  (zkConnectionManagerCallback-264-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 455636 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly asked otherwise
   [junit4]   2> 455818 WARN  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 455819 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 455819 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 455819 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12
   [junit4]   2> 455821 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 455821 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 455821 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 455821 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@26e1fff7{/,null,AVAILABLE}
   [junit4]   2> 455822 WARN  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@3265f667[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 455824 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@5718439c{SSL,[ssl, http/1.1]}{127.0.0.1:44030}
   [junit4]   2> 455824 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.s.Server Started @455949ms
   [junit4]   2> 455824 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://localhost:34449/hdfs__localhost_34449__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-repro_solr_build_solr-core_test_J1_temp_solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002_tempDir-002_control_data, hostContext=/, hostPort=44030, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/control-001/cores}
   [junit4]   2> 455825 ERROR (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 455825 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 455825 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 8.0.0
   [junit4]   2> 455825 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 455825 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 455825 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2018-12-29T10:59:25.392Z
   [junit4]   2> 455829 INFO  (zkConnectionManagerCallback-266-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 455831 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 455831 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/control-001/solr.xml
   [junit4]   2> 455840 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
   [junit4]   2> 455840 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 455842 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 455955 WARN  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 455973 WARN  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@12ae037a[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 455978 WARN  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 455980 WARN  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@26bcffbb[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 455982 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35950/solr
   [junit4]   2> 455992 INFO  (zkConnectionManagerCallback-272-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 456019 INFO  (zkConnectionManagerCallback-274-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 456178 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:44030_
   [junit4]   2> 456179 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.c.Overseer Overseer (id=72530497926529029-127.0.0.1:44030_-n_0000000000) starting
   [junit4]   2> 456205 INFO  (zkConnectionManagerCallback-281-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 456209 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35950/solr ready
   [junit4]   2> 456211 INFO  (OverseerStateUpdate-72530497926529029-127.0.0.1:44030_-n_0000000000) [n:127.0.0.1:44030_    ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:44030_
   [junit4]   2> 456212 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:44030_
   [junit4]   2> 456217 INFO  (zkCallback-273-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 456218 INFO  (zkCallback-280-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 456258 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 456291 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 456307 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 456308 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 456310 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [n:127.0.0.1:44030_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/control-001/cores
   [junit4]   2> 456420 INFO  (zkConnectionManagerCallback-287-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 456427 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 456430 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35950/solr ready
   [junit4]   2> 456461 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:44030_&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 456467 INFO  (OverseerThreadFactory-185-thread-1-processing-n:127.0.0.1:44030_) [n:127.0.0.1:44030_    ] o.a.s.c.a.c.CreateCollectionCmd Create collection control_collection
   [junit4]   2> 456582 INFO  (qtp2033508723-1986) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 456590 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 456591 INFO  (qtp2033508723-1984) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 456592 INFO  (qtp2033508723-1985) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 456595 INFO  (qtp2033508723-1986) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 456626 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_    x:control_collection_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 456627 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_    x:control_collection_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 457671 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 457745 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
   [junit4]   2> 457931 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
   [junit4]   2> 457975 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' using configuration from collection control_collection, trusted=true
   [junit4]   2> 457976 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.control_collection.shard1.replica_n1' (registry 'solr.core.control_collection.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 457976 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/control-001/cores/control_collection_shard1_replica_n1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/control-001/cores/control_collection_shard1_replica_n1/data/]
   [junit4]   2> 457993 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=6, maxMergeAtOnceExplicit=7, maxMergedSegmentMB=11.0712890625, floorSegmentMB=0.451171875, forceMergeDeletesPctAllowed=13.788308198062179, segmentsPerTier=12.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.3507848830384257, deletesPctAllowed=35.068063914256506
   [junit4]   2> 458012 WARN  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 458106 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 458106 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 458109 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: if uncommitted for 30000ms; 
   [junit4]   2> 458109 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: if uncommitted for 3000ms; 
   [junit4]   2> 458111 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=10, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0]
   [junit4]   2> 458114 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@4fb68ff7[control_collection_shard1_replica_n1] main]
   [junit4]   2> 458124 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 458125 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 458126 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 458126 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1621183606494855168
   [junit4]   2> 458137 INFO  (searcherExecutor-190-thread-1-processing-n:127.0.0.1:44030_ x:control_collection_shard1_replica_n1 c:control_collection s:shard1) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore [control_collection_shard1_replica_n1] Registered new searcher Searcher@4fb68ff7[control_collection_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 458145 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/control_collection/terms/shard1 to Terms{values={core_node2=0}, version=0}
   [junit4]   2> 458145 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/control_collection/leaders/shard1
   [junit4]   2> 458150 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 458150 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 458150 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:44030/control_collection_shard1_replica_n1/
   [junit4]   2> 458151 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 458151 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.SyncStrategy https://127.0.0.1:44030/control_collection_shard1_replica_n1/ has no replicas
   [junit4]   2> 458151 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/control_collection/leaders/shard1/leader after winning as /collections/control_collection/leader_elect/shard1/election/72530497926529029-core_node2-n_0000000000
   [junit4]   2> 458157 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:44030/control_collection_shard1_replica_n1/ shard1
   [junit4]   2> 458263 INFO  (zkCallback-273-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 458265 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 458274 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_ c:control_collection s:shard1  x:control_collection_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1648
   [junit4]   2> 458296 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
   [junit4]   2> 458375 INFO  (zkCallback-273-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 458375 INFO  (zkCallback-273-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/control_collection/state.json] for collection [control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 458376 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:44030_&wt=javabin&version=2} status=0 QTime=1915
   [junit4]   2> 458404 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x101ae1a844b0007, likely client has closed socket
   [junit4]   2> 458454 INFO  (zkConnectionManagerCallback-292-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 458456 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 458460 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35950/solr ready
   [junit4]   2> 458461 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 458467 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=1&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 458475 INFO  (OverseerCollectionConfigSetProcessor-72530497926529029-127.0.0.1:44030_-n_0000000000) [n:127.0.0.1:44030_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 458489 INFO  (OverseerThreadFactory-185-thread-2-processing-n:127.0.0.1:44030_) [n:127.0.0.1:44030_    ] o.a.s.c.a.c.CreateCollectionCmd Create collection collection1
   [junit4]   2> 458694 WARN  (OverseerThreadFactory-185-thread-2-processing-n:127.0.0.1:44030_) [n:127.0.0.1:44030_    ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to create a collection (collection1) without cores.
   [junit4]   2> 458698 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 45 seconds. Check all shard replicas
   [junit4]   2> 458701 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=1&wt=javabin&version=2} status=0 QTime=233
   [junit4]   2> 458720 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Creating jetty instances pullReplicaCount=0 numOtherReplicas=3
   [junit4]   2> 458916 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-1-001 of type NRT
   [junit4]   2> 458931 WARN  (closeThreadPool-293-thread-1) [    ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 458932 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 458932 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 458932 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12
   [junit4]   2> 458948 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 458948 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 458949 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 458949 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@3b6bc4a9{/,null,AVAILABLE}
   [junit4]   2> 458950 WARN  (closeThreadPool-293-thread-1) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@326cfcc5[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 458971 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.AbstractConnector Started ServerConnector@48ed1c5b{SSL,[ssl, http/1.1]}{127.0.0.1:37986}
   [junit4]   2> 458971 INFO  (closeThreadPool-293-thread-1) [    ] o.e.j.s.Server Started @459095ms
   [junit4]   2> 458971 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://localhost:34449/hdfs__localhost_34449__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-repro_solr_build_solr-core_test_J1_temp_solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002_tempDir-002_jetty1, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=37986, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-1-001/cores}
   [junit4]   2> 458971 ERROR (closeThreadPool-293-thread-1) [    ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 458971 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 458971 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 8.0.0
   [junit4]   2> 458971 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 458972 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 458972 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2018-12-29T10:59:28.539Z
   [junit4]   2> 459000 INFO  (zkConnectionManagerCallback-295-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459002 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 459002 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-1-001/solr.xml
   [junit4]   2> 459010 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
   [junit4]   2> 459010 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 459012 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 459154 WARN  (closeThreadPool-293-thread-1) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 459173 WARN  (closeThreadPool-293-thread-1) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@26469980[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 459190 WARN  (closeThreadPool-293-thread-1) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 459225 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-2-001 of type NRT
   [junit4]   2> 459237 WARN  (closeThreadPool-293-thread-1) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@1b847508[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 459238 WARN  (closeThreadPool-293-thread-2) [    ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 459238 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 459239 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 459239 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12
   [junit4]   2> 459239 INFO  (closeThreadPool-293-thread-1) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35950/solr
   [junit4]   2> 459294 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 459294 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 459294 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 459295 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@550debc3{/,null,AVAILABLE}
   [junit4]   2> 459296 WARN  (closeThreadPool-293-thread-2) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@1ab76913[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 459333 INFO  (zkConnectionManagerCallback-301-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459337 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.AbstractConnector Started ServerConnector@742785e{SSL,[ssl, http/1.1]}{127.0.0.1:40898}
   [junit4]   2> 459337 INFO  (closeThreadPool-293-thread-2) [    ] o.e.j.s.Server Started @459461ms
   [junit4]   2> 459337 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://localhost:34449/hdfs__localhost_34449__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-repro_solr_build_solr-core_test_J1_temp_solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002_tempDir-002_jetty2, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=40898, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-2-001/cores}
   [junit4]   2> 459338 ERROR (closeThreadPool-293-thread-2) [    ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 459338 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 459338 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 8.0.0
   [junit4]   2> 459338 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 459338 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 459338 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2018-12-29T10:59:28.905Z
   [junit4]   2> 459380 INFO  (zkConnectionManagerCallback-303-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459386 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 459390 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-2-001/solr.xml
   [junit4]   2> 459399 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
   [junit4]   2> 459399 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 459402 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 459412 INFO  (zkConnectionManagerCallback-305-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459443 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 459448 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.ZkController Publish node=127.0.0.1:37986_ as DOWN
   [junit4]   2> 459449 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 459449 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:37986_
   [junit4]   2> 459451 INFO  (zkCallback-280-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 459451 INFO  (zkCallback-273-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 459456 INFO  (zkCallback-291-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 459457 INFO  (zkCallback-304-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 459585 INFO  (zkConnectionManagerCallback-312-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459590 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 459592 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35950/solr ready
   [junit4]   2> 459609 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 459668 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 459685 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 459685 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 459687 INFO  (closeThreadPool-293-thread-1) [n:127.0.0.1:37986_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-1-001/cores
   [junit4]   2> 459740 INFO  (TEST-HdfsRestartWhileUpdatingTest.test-seed#[988D9624F13AA4D8]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-3-001 of type NRT
   [junit4]   2> 459751 WARN  (closeThreadPool-293-thread-3) [    ] o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
   [junit4]   2> 459758 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
   [junit4]   2> 459758 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
   [junit4]   2> 459758 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git: c4550056e785fb5665914545889f21dc136ad9e6; jvm 1.8.0_191-b12
   [junit4]   2> 459760 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 459760 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 459760 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 459760 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@496aad4c{/,null,AVAILABLE}
   [junit4]   2> 459761 WARN  (closeThreadPool-293-thread-3) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@28b37091[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 459764 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.AbstractConnector Started ServerConnector@34d30a8a{SSL,[ssl, http/1.1]}{127.0.0.1:44536}
   [junit4]   2> 459764 INFO  (closeThreadPool-293-thread-3) [    ] o.e.j.s.Server Started @459889ms
   [junit4]   2> 459764 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=hdfs://localhost:34449/hdfs__localhost_34449__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-repro_solr_build_solr-core_test_J1_temp_solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002_tempDir-002_jetty3, replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/, hostPort=44536, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-3-001/cores}
   [junit4]   2> 459765 ERROR (closeThreadPool-293-thread-3) [    ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 459765 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 459765 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 8.0.0
   [junit4]   2> 459765 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 459765 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 459765 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2018-12-29T10:59:29.332Z
   [junit4]   2> 459771 INFO  (zkConnectionManagerCallback-315-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459777 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 459777 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-3-001/solr.xml
   [junit4]   2> 459781 WARN  (closeThreadPool-293-thread-2) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 459786 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
   [junit4]   2> 459786 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 459792 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 459793 WARN  (closeThreadPool-293-thread-2) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@40030d1f[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 459797 WARN  (closeThreadPool-293-thread-2) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 459805 WARN  (closeThreadPool-293-thread-2) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@5fd5b937[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 459807 INFO  (closeThreadPool-293-thread-2) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35950/solr
   [junit4]   2> 459819 INFO  (zkConnectionManagerCallback-321-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459837 INFO  (zkConnectionManagerCallback-323-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459844 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 459860 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.ZkController Publish node=127.0.0.1:40898_ as DOWN
   [junit4]   2> 459861 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 459861 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:40898_
   [junit4]   2> 459868 INFO  (zkCallback-291-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 459868 INFO  (zkCallback-280-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 459869 INFO  (zkCallback-273-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 459869 INFO  (zkCallback-304-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 459869 INFO  (zkCallback-311-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 459889 INFO  (zkCallback-322-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 459964 INFO  (zkConnectionManagerCallback-330-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 459968 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 459970 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35950/solr ready
   [junit4]   2> 459984 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 460063 WARN  (closeThreadPool-293-thread-3) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 460066 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 460111 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 460111 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 460119 WARN  (closeThreadPool-293-thread-3) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@6e20834a[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 460120 INFO  (closeThreadPool-293-thread-2) [n:127.0.0.1:40898_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-2-001/cores
   [junit4]   2> 460157 WARN  (closeThreadPool-293-thread-3) [    ] o.a.s.c.s.i.Http2SolrClient Create Http2SolrClient with HTTP/1.1 transport since Java 8 or lower versions does not support SSL + HTTP/2
   [junit4]   2> 460191 WARN  (closeThreadPool-293-thread-3) [    ] o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for SslContextFactory@1c9eb69e[provider=null,keyStore=null,trustStore=null]
   [junit4]   2> 460193 INFO  (closeThreadPool-293-thread-3) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:35950/solr
   [junit4]   2> 460208 INFO  (zkConnectionManagerCallback-337-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 460229 INFO  (zkConnectionManagerCallback-339-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 460245 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 460250 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.ZkController Publish node=127.0.0.1:44536_ as DOWN
   [junit4]   2> 460252 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 transient cores
   [junit4]   2> 460252 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:44536_
   [junit4]   2> 460255 INFO  (zkCallback-291-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460255 INFO  (zkCallback-280-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460255 INFO  (zkCallback-304-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460255 INFO  (zkCallback-273-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460288 INFO  (zkCallback-311-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460289 INFO  (zkCallback-322-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460301 INFO  (zkCallback-338-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460302 INFO  (zkCallback-329-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 460319 INFO  (zkConnectionManagerCallback-346-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 460329 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (4)
   [junit4]   2> 460337 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:35950/solr ready
   [junit4]   2> 460348 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 460402 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 460432 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 460432 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 460434 INFO  (closeThreadPool-293-thread-3) [n:127.0.0.1:44536_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-3-001/cores
   [junit4]   2> 460494 INFO  (OverseerCollectionConfigSetProcessor-72530497926529029-127.0.0.1:44030_-n_0000000000) [n:127.0.0.1:44030_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000002 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 460669 INFO  (qtp369565589-2047) [n:127.0.0.1:37986_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:40898_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 460670 INFO  (qtp369565589-2046) [n:127.0.0.1:37986_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:44536_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 460671 INFO  (qtp369565589-2048) [n:127.0.0.1:37986_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params node=127.0.0.1:37986_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 460699 INFO  (qtp369565589-2049) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460701 INFO  (qtp369565589-2043) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460703 INFO  (qtp369565589-2049) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460704 INFO  (qtp369565589-2043) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460705 INFO  (qtp369565589-2049) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460726 INFO  (qtp1056606282-2066) [n:127.0.0.1:40898_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=2
   [junit4]   2> 460728 INFO  (qtp1056606282-2067) [n:127.0.0.1:40898_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460729 INFO  (qtp1056606282-2068) [n:127.0.0.1:40898_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460731 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460732 INFO  (qtp1056606282-2070) [n:127.0.0.1:40898_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460735 INFO  (qtp2033508723-1984) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=1
   [junit4]   2> 460737 INFO  (qtp2033508723-1985) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 460738 INFO  (qtp2033508723-1986) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 460740 INFO  (qtp2033508723-1987) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460746 INFO  (qtp2033508723-1983) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460747 INFO  (qtp2033508723-1984) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460749 INFO  (qtp2033508723-1985) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460750 INFO  (qtp2033508723-1986) [n:127.0.0.1:44030_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460786 INFO  (qtp196247845-2091) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460791 INFO  (qtp196247845-2092) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460793 INFO  (qtp196247845-2093) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460794 INFO  (qtp196247845-2094) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460795 INFO  (qtp196247845-2095) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 460796 INFO  (OverseerThreadFactory-185-thread-3-processing-n:127.0.0.1:44030_) [n:127.0.0.1:44030_ c:collection1 s:shard1  ] o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:40898_ for creating new replica of shard shard1 for collection collection1
   [junit4]   2> 460802 INFO  (OverseerThreadFactory-185-thread-3-processing-n:127.0.0.1:44030_) [n:127.0.0.1:44030_ c:collection1 s:shard1  ] o.a.s.c.a.c.AddReplicaCmd Returning CreateReplica command.
   [junit4]   2> 460877 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_    x:collection1_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n1&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 461923 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 461958 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.s.IndexSchema [collection1_shard1_replica_n1] Schema name=test
   [junit4]   2> 462160 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
   [junit4]   2> 462236 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'collection1_shard1_replica_n1' using configuration from collection collection1, trusted=true
   [junit4]   2> 462237 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.collection1.shard1.replica_n1' (registry 'solr.core.collection1.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@749837b0
   [junit4]   2> 462237 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.SolrCore [[collection1_shard1_replica_n1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-2-001/cores/collection1_shard1_replica_n1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J1/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_988D9624F13AA4D8-002/shard-2-001/cores/collection1_shard1_replica_n1/data/]
   [junit4]   2> 462243 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=6, maxMergeAtOnceExplicit=7, maxMergedSegmentMB=11.0712890625, floorSegmentMB=0.451171875, forceMergeDeletesPctAllowed=13.788308198062179, segmentsPerTier=12.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.3507848830384257, deletesPctAllowed=35.068063914256506
   [junit4]   2> 462278 WARN  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 462413 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 462413 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 462416 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: if uncommitted for 30000ms; 
   [junit4]   2> 462416 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: if uncommitted for 3000ms; 
   [junit4]   2> 462418 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=10, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0]
   [junit4]   2> 462425 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@2d23bc42[collection1_shard1_replica_n1] main]
   [junit4]   2> 462432 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 462432 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 462433 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 462433 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1621183611011072000
   [junit4]   2> 462457 INFO  (searcherExecutor-222-thread-1-processing-n:127.0.0.1:40898_ x:collection1_shard1_replica_n1 c:collection1 s:shard1) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.SolrCore [collection1_shard1_replica_n1] Registered new searcher Searcher@2d23bc42[collection1_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 462513 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/collection1/terms/shard1 to Terms{values={core_node2=0}, version=0}
   [junit4]   2> 462514 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContextBase make sure parent is created /collections/collection1/leaders/shard1
   [junit4]   2> 462517 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 462517 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 462517 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:40898/collection1_shard1_replica_n1/
   [junit4]   2> 462518 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 462518 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.SyncStrategy https://127.0.0.1:40898/collection1_shard1_replica_n1/ has no replicas
   [junit4]   2> 462518 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/collection1/leaders/shard1/leader after winning as /collections/collection1/leader_elect/shard1/election/72530497926529040-core_node2-n_0000000000
   [junit4]   2> 462520 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:40898/collection1_shard1_replica_n1/ shard1
   [junit4]   2> 462624 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 462627 INFO  (qtp1056606282-2069) [n:127.0.0.1:40898_ c:collection1 s:shard1  x:collection1_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n1&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT} status=0 QTime=1750
   [junit4]   2> 462635 INFO  (qtp369565589-2047) [n:127.0.0.1:37986_ c:collection1   ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={node=127.0.0.1:40898_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2} status=0 QTime=1965
   [junit4]   2> 462686 INFO  (OverseerCollectionConfigSetProcessor-72530497926529029-127.0.0.1:44030_-n_0000000000) [n:127.0.0.1:44030_    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000004 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 462695 INFO  (qtp196247845-2091) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 462697 INFO  (qtp196247845-2092) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462698 INFO  (qtp196247845-2093) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462699 INFO  (qtp196247845-2094) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462701 INFO  (qtp196247845-2095) [n:127.0.0.1:44536_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462707 INFO  (qtp369565589-2043) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462708 INFO  (qtp369565589-2049) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462709 INFO  (qtp369565589-2047) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462710 INFO  (qtp369565589-2043) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462712 INFO  (qtp369565589-2049) [n:127.0.0.1:37986_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 462716 INFO  (qtp1056606282-2070) [n:127.0.0.1:40898_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=ja

[...truncated too long message...]

current.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([988D9624F13AA4D8]:0)Throwable #3: com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie threads that couldn't be terminated:
   [junit4]    >    1) Thread[id=2028, name=searcherExecutor-190-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    >    2) Thread[id=2202, name=searcherExecutor-232-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    >    3) Thread[id=2396, name=searcherExecutor-280-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    >    4) Thread[id=2177, name=searcherExecutor-227-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    >    5) Thread[id=2165, name=searcherExecutor-222-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    >    6) Thread[id=2319, name=searcherExecutor-252-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    >    7) Thread[id=2358, name=searcherExecutor-266-thread-1, state=WAITING, group=TGRP-HdfsRestartWhileUpdatingTest]
   [junit4]    >         at sun.misc.Unsafe.park(Native Method)
   [junit4]    >         at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
   [junit4]    >         at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
   [junit4]    >         at java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
   [junit4]    >         at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]    >         at java.lang.Thread.run(Thread.java:748)
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([988D9624F13AA4D8]:0)
   [junit4] Completed [23/25 (5!)] on J1 in 145.07s, 1 test, 1 failure, 2 errors <<< FAILURES!
   [junit4] 
   [junit4] Suite: org.apache.solr.cloud.autoscaling.SearchRateTriggerIntegrationTest
   [junit4] Completed [24/25 (5!)] on J1 in 0.01s, 3 tests, 3 skipped
   [junit4] 
   [junit4] HEARTBEAT J2 PID(28524@lucene1-us-west): 2018-12-29T11:02:37, stalled for 62.9s at: SearchRateTriggerIntegrationTest.testBelowSearchRate
   [junit4] Suite: org.apache.solr.cloud.autoscaling.SearchRateTriggerIntegrationTest
   [junit4] OK      64.5s J2 | SearchRateTriggerIntegrationTest.testDeleteNode
   [junit4] OK      32.9s J2 | SearchRateTriggerIntegrationTest.testAboveSearchRate
   [junit4] OK      72.3s J2 | SearchRateTriggerIntegrationTest.testBelowSearchRate
   [junit4] Completed [25/25 (5!)] on J2 in 172.66s, 3 tests
   [junit4] 
   [junit4] 
   [junit4] Tests with failures [seed: 988D9624F13AA4D8]:
   [junit4]   - org.apache.solr.cloud.autoscaling.sim.TestSimTriggerIntegration.testNodeMarkersRegistration
   [junit4]   - org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest.test
   [junit4]   - org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest.test
   [junit4]   - org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest (suite)
   [junit4]   - org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest (suite)
   [junit4] 
   [junit4] 
   [junit4] JVM J0:     1.03 ..   458.89 =   457.85s
   [junit4] JVM J1:     1.09 ..   626.83 =   625.73s
   [junit4] JVM J2:     1.05 ..   658.74 =   657.69s
   [junit4] Execution time total: 10 minutes 58 seconds
   [junit4] Tests summary: 25 suites, 85 tests, 6 suite-level errors, 3 failures, 55 ignored (6 assumptions)

BUILD FAILED
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/lucene/common-build.xml:1572: The following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/lucene/common-build.xml:1099: There were test failures: 25 suites, 85 tests, 6 suite-level errors, 3 failures, 55 ignored (6 assumptions) [seed: 988D9624F13AA4D8]

Total time: 11 minutes 1 second

[repro] Setting last failure code to 256

[repro] Failures:
[repro]   0/5 failed: org.apache.solr.cloud.autoscaling.SearchRateTriggerIntegrationTest
[repro]   0/5 failed: org.apache.solr.cloud.hdfs.HdfsRecoveryZkTest
[repro]   0/5 failed: org.apache.solr.handler.TestSolrConfigHandlerCloud
[repro]   1/5 failed: org.apache.solr.cloud.autoscaling.sim.TestSimTriggerIntegration
[repro]   4/5 failed: org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest
[repro] git checkout 345a655f216258c406c384ada9aa6d5f14e254f9
HEAD is now at 345a655... SOLR-12973: Admin UI Nodes view support for replica* replica names. (Daniel Collins, Christine Poerschke, janhoy)
[repro] Exiting with code 256
Archiving artifacts
[Fast Archiver] No artifacts from Lucene-Solr-repro Repro-Lucene-Solr-Tests-master#3098 to compare, so performing full copy of artifacts
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)

[JENKINS] Lucene-Solr-repro - Build # 2584 - Still Unstable

Posted by Apache Jenkins Server <je...@builds.apache.org>.
Build: https://builds.apache.org/job/Lucene-Solr-repro/2584/

[...truncated 28 lines...]
[repro] Jenkins log URL: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/416/consoleText

[repro] Revision: 9aa15a2accd47214dc2d76a035fa31450a079f62

[repro] Ant options: -Dtests.multiplier=2 -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/test-data/enwiki.random.lines.txt
[repro] Repro line:  ant test  -Dtestcase=ForceLeaderTest -Dtests.method=testReplicasInLIRNoLeader -Dtests.seed=7A776A947A0461A0 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/test-data/enwiki.random.lines.txt -Dtests.locale=en-NZ -Dtests.timezone=Pacific/Ponape -Dtests.asserts=true -Dtests.file.encoding=UTF-8

[repro] git rev-parse --abbrev-ref HEAD
[repro] git rev-parse HEAD
[repro] Initial local git branch/revision: 345a655f216258c406c384ada9aa6d5f14e254f9
[repro] git fetch
[repro] git checkout 9aa15a2accd47214dc2d76a035fa31450a079f62

[...truncated 2 lines...]
[repro] git merge --ff-only

[...truncated 1 lines...]
[repro] ant clean

[...truncated 6 lines...]
[repro] Test suites by module:
[repro]    solr/core
[repro]       ForceLeaderTest
[repro] ant compile-test

[...truncated 3605 lines...]
[repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=5 -Dtests.class="*.ForceLeaderTest" -Dtests.showOutput=onerror -Dtests.multiplier=2 -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/test-data/enwiki.random.lines.txt -Dtests.seed=7A776A947A0461A0 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-7.x/test-data/enwiki.random.lines.txt -Dtests.locale=en-NZ -Dtests.timezone=Pacific/Ponape -Dtests.asserts=true -Dtests.file.encoding=UTF-8

[...truncated 1791 lines...]
   [junit4]   2> 47492 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.AbstractFullDistribZkTestBase ERROR: org.apache.solr.common.SolrException: Could not find a healthy node to handle the request. ... Sleeping for 1 seconds before re-try ...
   [junit4]   2> 47758 ERROR (indexFetcher-80-thread-1) [    ] o.a.s.h.ReplicationHandler Index fetch failed :org.apache.solr.common.SolrException: No registered leader was found after waiting for 4000ms , collection: forceleader_test_collection slice: shard1 saw state=DocCollection(forceleader_test_collection//collections/forceleader_test_collection/state.json/15)={
   [junit4]   2>   "pullReplicas":"0",
   [junit4]   2>   "replicationFactor":"0",
   [junit4]   2>   "shards":{"shard1":{
   [junit4]   2>       "range":"80000000-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node2":{
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t1",
   [junit4]   2>           "base_url":"http://127.0.0.1:45355/gsn/uk",
   [junit4]   2>           "node_name":"127.0.0.1:45355_gsn%2Fuk",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG"},
   [junit4]   2>         "core_node4":{
   [junit4]   2>           "state":"down",
   [junit4]   2>           "base_url":"http://127.0.0.1:43543/gsn/uk",
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t3",
   [junit4]   2>           "node_name":"127.0.0.1:43543_gsn%2Fuk",
   [junit4]   2>           "force_set_state":"false",
   [junit4]   2>           "type":"TLOG"},
   [junit4]   2>         "core_node6":{
   [junit4]   2>           "state":"down",
   [junit4]   2>           "base_url":"http://127.0.0.1:38474/gsn/uk",
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t5",
   [junit4]   2>           "node_name":"127.0.0.1:38474_gsn%2Fuk",
   [junit4]   2>           "force_set_state":"false",
   [junit4]   2>           "type":"TLOG"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"1",
   [junit4]   2>   "autoAddReplicas":"false",
   [junit4]   2>   "nrtReplicas":"0",
   [junit4]   2>   "tlogReplicas":"3"} with live_nodes=[127.0.0.1:43543_gsn%2Fuk, 127.0.0.1:36990_gsn%2Fuk, 127.0.0.1:38474_gsn%2Fuk]
   [junit4]   2> 	at org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:902)
   [junit4]   2> 	at org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:879)
   [junit4]   2> 	at org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:688)
   [junit4]   2> 	at org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:381)
   [junit4]   2> 	at org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:346)
   [junit4]   2> 	at org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:425)
   [junit4]   2> 	at org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$13(ReplicationHandler.java:1171)
   [junit4]   2> 	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2> 	at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2> 	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2> 	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 47758 INFO  (recoveryExecutor-66-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[false]
   [junit4]   2> 48171 ERROR (indexFetcher-73-thread-1) [    ] o.a.s.h.ReplicationHandler Index fetch failed :org.apache.solr.common.SolrException: No registered leader was found after waiting for 4000ms , collection: forceleader_test_collection slice: shard1 saw state=DocCollection(forceleader_test_collection//collections/forceleader_test_collection/state.json/15)={
   [junit4]   2>   "pullReplicas":"0",
   [junit4]   2>   "replicationFactor":"0",
   [junit4]   2>   "shards":{"shard1":{
   [junit4]   2>       "range":"80000000-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node2":{
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t1",
   [junit4]   2>           "base_url":"http://127.0.0.1:45355/gsn/uk",
   [junit4]   2>           "node_name":"127.0.0.1:45355_gsn%2Fuk",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG"},
   [junit4]   2>         "core_node4":{
   [junit4]   2>           "state":"down",
   [junit4]   2>           "base_url":"http://127.0.0.1:43543/gsn/uk",
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t3",
   [junit4]   2>           "node_name":"127.0.0.1:43543_gsn%2Fuk",
   [junit4]   2>           "force_set_state":"false",
   [junit4]   2>           "type":"TLOG"},
   [junit4]   2>         "core_node6":{
   [junit4]   2>           "state":"down",
   [junit4]   2>           "base_url":"http://127.0.0.1:38474/gsn/uk",
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t5",
   [junit4]   2>           "node_name":"127.0.0.1:38474_gsn%2Fuk",
   [junit4]   2>           "force_set_state":"false",
   [junit4]   2>           "type":"TLOG"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"1",
   [junit4]   2>   "autoAddReplicas":"false",
   [junit4]   2>   "nrtReplicas":"0",
   [junit4]   2>   "tlogReplicas":"3"} with live_nodes=[127.0.0.1:43543_gsn%2Fuk, 127.0.0.1:36990_gsn%2Fuk, 127.0.0.1:38474_gsn%2Fuk]
   [junit4]   2> 	at org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:902)
   [junit4]   2> 	at org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:879)
   [junit4]   2> 	at org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:688)
   [junit4]   2> 	at org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:381)
   [junit4]   2> 	at org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:346)
   [junit4]   2> 	at org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:425)
   [junit4]   2> 	at org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$13(ReplicationHandler.java:1171)
   [junit4]   2> 	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2> 	at java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2> 	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2> 	at java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 48172 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Finished recovery process, successful=[false]
   [junit4]   2> 48172 INFO  (updateExecutor-37-thread-2-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 48172 INFO  (updateExecutor-37-thread-2-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ActionThrottle Throttling recovery attempts - waiting for 6207ms
   [junit4]   2> 48493 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=0 commError=false errorCode=510 
   [junit4]   2> 48493 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 48493 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 48494 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=1 commError=false errorCode=510 
   [junit4]   2> 48494 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 48494 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 48495 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=2 commError=false errorCode=510 
   [junit4]   2> 48495 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 48495 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 48496 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=3 commError=false errorCode=510 
   [junit4]   2> 48496 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 48496 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 48496 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=4 commError=false errorCode=510 
   [junit4]   2> 48497 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 48497 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 48497 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=5 commError=false errorCode=510 
   [junit4]   2> 48497 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 48497 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.AbstractFullDistribZkTestBase ERROR: org.apache.solr.common.SolrException: Could not find a healthy node to handle the request. ... Sleeping for 1 seconds before re-try ...
   [junit4]   2> 49498 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=0 commError=false errorCode=510 
   [junit4]   2> 49499 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 49499 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 49499 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=1 commError=false errorCode=510 
   [junit4]   2> 49499 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 49499 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 49500 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=2 commError=false errorCode=510 
   [junit4]   2> 49500 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 49500 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 49501 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=3 commError=false errorCode=510 
   [junit4]   2> 49501 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 49501 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 49501 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=4 commError=false errorCode=510 
   [junit4]   2> 49501 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 49502 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 49502 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=5 commError=false errorCode=510 
   [junit4]   2> 49502 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 49502 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.AbstractFullDistribZkTestBase ERROR: org.apache.solr.common.SolrException: Could not find a healthy node to handle the request. ... Sleeping for 1 seconds before re-try ...
   [junit4]   2> 49759 INFO  (ScheduledTrigger-8-thread-3) [    ] o.a.s.c.a.SystemLogListener Collection .system does not exist, disabling logging.
   [junit4]   2> 49780 INFO  (qtp639435276-33) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49782 INFO  (qtp639435276-29) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49784 INFO  (qtp639435276-30) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49787 INFO  (qtp639435276-32) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49789 INFO  (qtp639435276-31) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 49793 INFO  (qtp639435276-33) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 49796 INFO  (qtp639435276-29) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 49798 INFO  (qtp639435276-30) [n:127.0.0.1:36990_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=0
   [junit4]   2> 49800 INFO  (SocketProxy-Acceptor-38474) [    ] o.a.s.c.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48848,localport=38474], receiveBufferSize:531000
   [junit4]   2> 49801 INFO  (SocketProxy-Acceptor-38474) [    ] o.a.s.c.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=46202,localport=44678], receiveBufferSize=530904
   [junit4]   2> 49803 INFO  (qtp1309884849-132) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.collection1.shard2.replica_n5:INDEX.sizeInBytes&key=solr.core.forceleader_test_collection.shard1.replica_t5:INDEX.sizeInBytes} status=0 QTime=1
   [junit4]   2> 49805 INFO  (qtp1309884849-134) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.collection1.shard2.replica_n5:INDEX.sizeInBytes&key=solr.core.forceleader_test_collection.shard1.replica_t5:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49807 INFO  (qtp1309884849-133) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.collection1.shard2.replica_n5:INDEX.sizeInBytes&key=solr.core.forceleader_test_collection.shard1.replica_t5:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49810 INFO  (qtp1309884849-135) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49814 INFO  (qtp1309884849-131) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49816 INFO  (qtp1309884849-132) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49819 INFO  (qtp1309884849-134) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49822 INFO  (qtp1309884849-133) [n:127.0.0.1:38474_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49823 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/
   [junit4]   2> 49823 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk START replicas=[http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/] nUpdates=100
   [junit4]   2> 49824 INFO  (SocketProxy-Acceptor-43543) [    ] o.a.s.c.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48100,localport=43543], receiveBufferSize:531000
   [junit4]   2> 49825 INFO  (qtp756160782-98) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 49826 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk  Received 1 versions from http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/ fingerprint:null
   [junit4]   2> 49827 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&checkCanHandleVersionRanges=false&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 49828 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk  No additional versions requested. ourHighThreshold=1621216223095160832 otherLowThreshold=1621216223095160832 ourHighest=1621216223095160832 otherHighest=1621216223095160832
   [junit4]   2> 49828 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk DONE. sync succeeded
   [junit4]   2> 49828 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 49828 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/: try and ask http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/ to sync
   [junit4]   2> 49829 INFO  (qtp756160782-99) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t3 url=http://127.0.0.1:43543/gsn/uk START replicas=[http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/] nUpdates=100
   [junit4]   2> 49831 INFO  (qtp1309884849-135) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 49831 INFO  (qtp1309884849-135) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 49831 INFO  (SocketProxy-Acceptor-43543) [    ] o.a.s.c.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=40831,localport=53104], receiveBufferSize=530904
   [junit4]   2> 49832 INFO  (qtp756160782-99) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 49832 INFO  (qtp756160782-99) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync We are already in sync. No need to do a PeerSync 
   [junit4]   2> 49832 INFO  (qtp756160782-99) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&getVersions=100&sync=http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/&wt=javabin&version=2} status=0 QTime=3
   [junit4]   2> 49832 INFO  (qtp756160782-100) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.forceleader_test_collection.shard1.replica_t3:INDEX.sizeInBytes&key=solr.core.collection1.shard2.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49833 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/:  sync completed with http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/
   [junit4]   2> 49834 WARN  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContext The previous leader marked me forceleader_test_collection_shard1_replica_t5 as down and I haven't recovered yet, so I shouldn't be the leader.
   [junit4]   2> 49834 ERROR (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContext There was a problem trying to register as the leader:org.apache.solr.common.SolrException: Leader Initiated Recovery prevented leadership
   [junit4]   2> 	at org.apache.solr.cloud.ShardLeaderElectionContext.checkLIR(ElectionContext.java:631)
   [junit4]   2> 	at org.apache.solr.cloud.ShardLeaderElectionContext.runLeaderProcess(ElectionContext.java:460)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector.runIamLeaderProcess(LeaderElector.java:171)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector.checkIfIamLeader(LeaderElector.java:136)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector.access$200(LeaderElector.java:57)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector$ElectionWatcher.process(LeaderElector.java:349)
   [junit4]   2> 	at org.apache.solr.common.cloud.SolrZkClient$3.lambda$process$1(SolrZkClient.java:287)
   [junit4]   2> 	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2> 	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
   [junit4]   2> 	at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:209)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 49834 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContext There may be a better leader candidate than us - going back into recovery
   [junit4]   2> 49834 INFO  (qtp756160782-96) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.forceleader_test_collection.shard1.replica_t3:INDEX.sizeInBytes&key=solr.core.collection1.shard2.replica_n1:INDEX.sizeInBytes} status=0 QTime=0
   [junit4]   2> 49835 INFO  (zkCallback-71-thread-1) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContextBase No version found for ephemeral leader parent node, won't remove previous leader registration.
   [junit4]   2> 49835 WARN  (updateExecutor-65-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.RecoveryStrategy Stopping recovery for core=[forceleader_test_collection_shard1_replica_t5] coreNodeName=[core_node6]
   [junit4]   2> 49835 INFO  (updateExecutor-65-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 49835 INFO  (updateExecutor-65-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ActionThrottle Throttling recovery attempts - waiting for 6195ms
   [junit4]   2> 49840 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 49840 WARN  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Stopping recovery for core=[forceleader_test_collection_shard1_replica_t3] coreNodeName=[core_node4]
   [junit4]   2> 49840 INFO  (qtp756160782-98) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={wt=javabin&version=2&key=solr.core.forceleader_test_collection.shard1.replica_t3:INDEX.sizeInBytes&key=solr.core.collection1.shard2.replica_n1:INDEX.sizeInBytes} status=0 QTime=1
   [junit4]   2> 49844 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49844 INFO  (zkCallback-71-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 49844 INFO  (zkCallback-71-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 49844 INFO  (zkCallback-71-thread-4) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 49845 INFO  (zkCallback-47-thread-4) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 49845 INFO  (zkCallback-47-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 49845 INFO  (zkCallback-47-thread-3) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 49849 INFO  (qtp756160782-99) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49852 INFO  (qtp756160782-100) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49855 INFO  (qtp756160782-96) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49857 INFO  (qtp756160782-98) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core} status=0 QTime=1
   [junit4]   2> 49867 INFO  (AutoscalingActionExecutor-9-thread-1) [    ] o.a.s.c.a.ExecutePlanAction No operations to execute for event: {
   [junit4]   2>   "id":"19bf27b251dc2dT2a8ggqdqlrzufiespe694ucg6",
   [junit4]   2>   "source":".auto_add_replicas",
   [junit4]   2>   "eventTime":7247051634105389,
   [junit4]   2>   "eventType":"NODELOST",
   [junit4]   2>   "properties":{
   [junit4]   2>     "eventTimes":[7247051634105389],
   [junit4]   2>     "preferredOperation":"movereplica",
   [junit4]   2>     "_enqueue_time_":7247061644222564,
   [junit4]   2>     "nodeNames":["127.0.0.1:45355_gsn%2Fuk"]}}
   [junit4]   2> 50504 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=0 commError=false errorCode=510 
   [junit4]   2> 50504 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 50505 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 50507 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=1 commError=false errorCode=510 
   [junit4]   2> 50507 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 50507 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 50507 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=2 commError=false errorCode=510 
   [junit4]   2> 50507 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 50507 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 50508 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=3 commError=false errorCode=510 
   [junit4]   2> 50508 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 50508 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 50509 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=4 commError=false errorCode=510 
   [junit4]   2> 50509 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 50509 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 50509 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=5 commError=false errorCode=510 
   [junit4]   2> 50509 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 50510 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.AbstractFullDistribZkTestBase ERROR: org.apache.solr.common.SolrException: Could not find a healthy node to handle the request. ... Sleeping for 1 seconds before re-try ...
   [junit4]   2> 51511 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=0 commError=false errorCode=510 
   [junit4]   2> 51511 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 51511 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 51512 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=1 commError=false errorCode=510 
   [junit4]   2> 51512 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 51512 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 51512 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=2 commError=false errorCode=510 
   [junit4]   2> 51512 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 51512 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 51513 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=3 commError=false errorCode=510 
   [junit4]   2> 51513 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 51513 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 51514 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=4 commError=false errorCode=510 
   [junit4]   2> 51514 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 51514 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 51514 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=5 commError=false errorCode=510 
   [junit4]   2> 51514 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 51515 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.AbstractFullDistribZkTestBase ERROR: org.apache.solr.common.SolrException: Could not find a healthy node to handle the request. ... Sleeping for 1 seconds before re-try ...
   [junit4]   2> 52340 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/
   [junit4]   2> 52341 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t3 url=http://127.0.0.1:43543/gsn/uk START replicas=[http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/] nUpdates=100
   [junit4]   2> 52343 INFO  (qtp1309884849-131) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 52344 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t3 url=http://127.0.0.1:43543/gsn/uk  Received 1 versions from http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/ fingerprint:null
   [junit4]   2> 52346 INFO  (qtp1309884849-132) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&checkCanHandleVersionRanges=false&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 52347 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t3 url=http://127.0.0.1:43543/gsn/uk  No additional versions requested. ourHighThreshold=1621216223095160832 otherLowThreshold=1621216223095160832 ourHighest=1621216223095160832 otherHighest=1621216223095160832
   [junit4]   2> 52347 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t3 url=http://127.0.0.1:43543/gsn/uk DONE. sync succeeded
   [junit4]   2> 52347 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 52347 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.SyncStrategy http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/: try and ask http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/ to sync
   [junit4]   2> 52349 INFO  (qtp1309884849-134) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk START replicas=[http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/] nUpdates=100
   [junit4]   2> 52355 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.IndexFingerprint IndexFingerprint millis:4.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 52355 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2} status=0 QTime=5
   [junit4]   2> 52356 INFO  (qtp1309884849-134) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 52356 INFO  (qtp1309884849-134) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync We are already in sync. No need to do a PeerSync 
   [junit4]   2> 52356 INFO  (qtp1309884849-134) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&getVersions=100&sync=http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/&wt=javabin&version=2} status=0 QTime=8
   [junit4]   2> 52357 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.SyncStrategy http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/:  sync completed with http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/
   [junit4]   2> 52358 WARN  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ShardLeaderElectionContext The previous leader marked me forceleader_test_collection_shard1_replica_t3 as down and I haven't recovered yet, so I shouldn't be the leader.
   [junit4]   2> 52358 ERROR (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ShardLeaderElectionContext There was a problem trying to register as the leader:org.apache.solr.common.SolrException: Leader Initiated Recovery prevented leadership
   [junit4]   2> 	at org.apache.solr.cloud.ShardLeaderElectionContext.checkLIR(ElectionContext.java:631)
   [junit4]   2> 	at org.apache.solr.cloud.ShardLeaderElectionContext.runLeaderProcess(ElectionContext.java:460)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector.runIamLeaderProcess(LeaderElector.java:171)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector.checkIfIamLeader(LeaderElector.java:136)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector.access$200(LeaderElector.java:57)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector$ElectionWatcher.process(LeaderElector.java:349)
   [junit4]   2> 	at org.apache.solr.common.cloud.SolrZkClient$3.lambda$process$1(SolrZkClient.java:287)
   [junit4]   2> 	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2> 	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
   [junit4]   2> 	at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:209)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 52358 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ShardLeaderElectionContext There may be a better leader candidate than us - going back into recovery
   [junit4]   2> 52359 INFO  (zkCallback-47-thread-1) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ShardLeaderElectionContextBase No version found for ephemeral leader parent node, won't remove previous leader registration.
   [junit4]   2> 52359 WARN  (updateExecutor-37-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Stopping recovery for core=[forceleader_test_collection_shard1_replica_t3] coreNodeName=[core_node4]
   [junit4]   2> 52362 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 52362 WARN  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.RecoveryStrategy Stopping recovery for core=[forceleader_test_collection_shard1_replica_t5] coreNodeName=[core_node6]
   [junit4]   2> 52363 INFO  (zkCallback-47-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 52363 INFO  (zkCallback-47-thread-4) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 52363 INFO  (zkCallback-71-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 52363 INFO  (zkCallback-71-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 52363 INFO  (zkCallback-47-thread-3) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 52363 INFO  (zkCallback-71-thread-3) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 52516 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=0 commError=false errorCode=510 
   [junit4]   2> 52516 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 52516 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 52517 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=1 commError=false errorCode=510 
   [junit4]   2> 52517 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 52517 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 52517 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=2 commError=false errorCode=510 
   [junit4]   2> 52518 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 52518 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 52518 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=3 commError=false errorCode=510 
   [junit4]   2> 52518 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 52518 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 52519 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=4 commError=false errorCode=510 
   [junit4]   2> 52519 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 52519 WARN  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Re-trying request to collection(s) [forceleader_test_collection] after stale state error from server.
   [junit4]   2> 52519 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient Request to collection [forceleader_test_collection] failed due to (510) org.apache.solr.common.SolrException: Could not find a healthy node to handle the request., retry=5 commError=false errorCode=510 
   [junit4]   2> 52520 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.CloudSolrClient request was not communication error it seems
   [junit4]   2> 52520 ERROR (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.AbstractFullDistribZkTestBase No more retries available! Add batch failed due to: org.apache.solr.common.SolrException: Could not find a healthy node to handle the request.
   [junit4]   2> 52520 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.ForceLeaderTest Document couldn't be sent, which is expected.
   [junit4]   2> 52533 INFO  (zkConnectionManagerCallback-94-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 52536 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 52538 INFO  (TEST-ForceLeaderTest.testReplicasInLIRNoLeader-seed#[7A776A947A0461A0]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:41246/solr ready
   [junit4]   2> 52539 INFO  (SocketProxy-Acceptor-43543) [    ] o.a.s.c.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48120,localport=43543], receiveBufferSize:531000
   [junit4]   2> 52540 INFO  (SocketProxy-Acceptor-43543) [    ] o.a.s.c.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=40831,localport=53124], receiveBufferSize=530904
   [junit4]   2> 52541 INFO  (qtp756160782-100) [n:127.0.0.1:43543_gsn%2Fuk    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :forceleader with params action=FORCELEADER&collection=forceleader_test_collection&shard=shard1&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 52541 INFO  (qtp756160782-100) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection   ] o.a.s.h.a.CollectionsHandler Force leader invoked, state: znodeVersion: 10
   [junit4]   2> live nodes:[127.0.0.1:43543_gsn%2Fuk, 127.0.0.1:36990_gsn%2Fuk, 127.0.0.1:38474_gsn%2Fuk]
   [junit4]   2> collections:{collection1=DocCollection(collection1//clusterstate.json/10)={
   [junit4]   2>   "pullReplicas":"0",
   [junit4]   2>   "replicationFactor":"1",
   [junit4]   2>   "shards":{
   [junit4]   2>     "shard1":{
   [junit4]   2>       "range":"80000000-ffffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{"core_node4":{
   [junit4]   2>           "core":"collection1_shard1_replica_n2",
   [junit4]   2>           "base_url":"http://127.0.0.1:45355/gsn/uk",
   [junit4]   2>           "node_name":"127.0.0.1:45355_gsn%2Fuk",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "leader":"true"}}},
   [junit4]   2>     "shard2":{
   [junit4]   2>       "range":"0-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node3":{
   [junit4]   2>           "core":"collection1_shard2_replica_n1",
   [junit4]   2>           "base_url":"http://127.0.0.1:43543/gsn/uk",
   [junit4]   2>           "node_name":"127.0.0.1:43543_gsn%2Fuk",
   [junit4]   2>           "state":"active",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "leader":"true"},
   [junit4]   2>         "core_node6":{
   [junit4]   2>           "core":"collection1_shard2_replica_n5",
   [junit4]   2>           "base_url":"http://127.0.0.1:38474/gsn/uk",
   [junit4]   2>           "node_name":"127.0.0.1:38474_gsn%2Fuk",
   [junit4]   2>           "state":"active",
   [junit4]   2>           "type":"NRT"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"1",
   [junit4]   2>   "autoAddReplicas":"false",
   [junit4]   2>   "nrtReplicas":"1",
   [junit4]   2>   "tlogReplicas":"0"}, forceleader_test_collection=DocCollection(forceleader_test_collection//collections/forceleader_test_collection/state.json/17)={
   [junit4]   2>   "pullReplicas":"0",
   [junit4]   2>   "replicationFactor":"0",
   [junit4]   2>   "shards":{"shard1":{
   [junit4]   2>       "range":"80000000-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node2":{
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t1",
   [junit4]   2>           "base_url":"http://127.0.0.1:45355/gsn/uk",
   [junit4]   2>           "node_name":"127.0.0.1:45355_gsn%2Fuk",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG"},
   [junit4]   2>         "core_node4":{
   [junit4]   2>           "state":"down",
   [junit4]   2>           "base_url":"http://127.0.0.1:43543/gsn/uk",
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t3",
   [junit4]   2>           "node_name":"127.0.0.1:43543_gsn%2Fuk",
   [junit4]   2>           "force_set_state":"false",
   [junit4]   2>           "type":"TLOG"},
   [junit4]   2>         "core_node6":{
   [junit4]   2>           "state":"down",
   [junit4]   2>           "base_url":"http://127.0.0.1:38474/gsn/uk",
   [junit4]   2>           "core":"forceleader_test_collection_shard1_replica_t5",
   [junit4]   2>           "node_name":"127.0.0.1:38474_gsn%2Fuk",
   [junit4]   2>           "force_set_state":"false",
   [junit4]   2>           "type":"TLOG"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"1",
   [junit4]   2>   "autoAddReplicas":"false",
   [junit4]   2>   "nrtReplicas":"0",
   [junit4]   2>   "tlogReplicas":"3"}, control_collection=LazyCollectionRef(control_collection)}
   [junit4]   2> 52547 INFO  (qtp756160782-100) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection   ] o.a.s.h.a.CollectionsHandler Cleaning out LIR data, which was:     /collections/forceleader_test_collection/leader_initiated_recovery/shard1 (2)
   [junit4]   2>      /collections/forceleader_test_collection/leader_initiated_recovery/shard1/core_node6 (0)
   [junit4]   2>      DATA:
   [junit4]   2>          {
   [junit4]   2>            "state":"down",
   [junit4]   2>            "createdByNodeName":"127.0.0.1:45355_gsn%2Fuk",
   [junit4]   2>            "createdByCoreNodeName":"core_node2"}
   [junit4]   2>      /collections/forceleader_test_collection/leader_initiated_recovery/shard1/core_node4 (0)
   [junit4]   2>      DATA:
   [junit4]   2>          {
   [junit4]   2>            "state":"down",
   [junit4]   2>            "createdByNodeName":"127.0.0.1:45355_gsn%2Fuk",
   [junit4]   2>            "createdByCoreNodeName":"core_node2"}
   [junit4]   2> 
   [junit4]   2> 54380 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=false
   [junit4]   2> 54380 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.ZkController forceleader_test_collection_shard1_replica_t3 stopping background replication from leader
   [junit4]   2> 54862 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/
   [junit4]   2> 54862 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk START replicas=[http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/] nUpdates=100
   [junit4]   2> 54865 INFO  (qtp756160782-96) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 54865 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk  Received 1 versions from http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/ fingerprint:null
   [junit4]   2> 54867 INFO  (qtp756160782-98) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&checkCanHandleVersionRanges=false&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 54870 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk  No additional versions requested. ourHighThreshold=1621216223095160832 otherLowThreshold=1621216223095160832 ourHighest=1621216223095160832 otherHighest=1621216223095160832
   [junit4]   2> 54870 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t5 url=http://127.0.0.1:38474/gsn/uk DONE. sync succeeded
   [junit4]   2> 54870 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 54871 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/: try and ask http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/ to sync
   [junit4]   2> 54872 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync PeerSync: core=forceleader_test_collection_shard1_replica_t3 url=http://127.0.0.1:43543/gsn/uk START replicas=[http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/] nUpdates=100
   [junit4]   2> 54874 INFO  (qtp1309884849-133) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 54874 INFO  (qtp1309884849-133) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 54875 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 54875 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.PeerSync We are already in sync. No need to do a PeerSync 
   [junit4]   2> 54876 INFO  (qtp756160782-97) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t3]  webapp=/gsn/uk path=/get params={distrib=false&qt=/get&getVersions=100&sync=http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/&wt=javabin&version=2} status=0 QTime=3
   [junit4]   2> 54876 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SyncStrategy http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/:  sync completed with http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/
   [junit4]   2> 54877 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ZkController forceleader_test_collection_shard1_replica_t5 stopping background replication from leader
   [junit4]   2> 54880 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContext Replaying tlog before become new leader
   [junit4]   2> 54880 WARN  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.UpdateLog Starting log replay tlog{file=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J2/temp/solr.cloud.ForceLeaderTest_7A776A947A0461A0-001/shard-3-001/cores/forceleader_test_collection_shard1_replica_t5/data/tlog/tlog.0000000000000000000 refcount=2} active=false starting pos=0 inSortedOrder=true
   [junit4]   2> 54886 INFO  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.DirectUpdateHandler2 start commit{flags=2,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 54886 INFO  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@1903b417 commitCommandVersion:0
   [junit4]   2> 55032 INFO  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.s.SolrIndexSearcher Opening [Searcher@75944417[forceleader_test_collection_shard1_replica_t5] main]
   [junit4]   2> 55040 INFO  (searcherExecutor-74-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.SolrCore [forceleader_test_collection_shard1_replica_t5] Registered new searcher Searcher@75944417[forceleader_test_collection_shard1_replica_t5] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.7.0):C1:[diagnostics={os=Linux, java.vendor=Oracle Corporation, java.version=1.8.0_191, java.vm.version=25.191-b12, lucene.version=7.7.0, os.arch=amd64, java.runtime.version=1.8.0_191-b12, source=flush, os.version=4.4.0-137-generic, timestamp=1546112291091}]:[attributes={Lucene50StoredFieldsFormat.mode=BEST_SPEED}])))}
   [junit4]   2> 55042 INFO  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 55043 INFO  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.p.LogUpdateProcessorFactory [forceleader_test_collection_shard1_replica_t5] {add=[1 (1621216223095160832)]} 0 162
   [junit4]   2> 55043 WARN  (recoveryExecutor-76-thread-1-processing-n:127.0.0.1:38474_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t5 c:forceleader_test_collection s:shard1 r:core_node6) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.u.UpdateLog Log replay finished. recoveryInfo=RecoveryInfo{adds=1 deletes=0 deleteByQuery=0 errors=0 positionOfStart=0}
   [junit4]   2> 55045 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node /collections/forceleader_test_collection/leaders/shard1/leader after winning as /collections/forceleader_test_collection/leader_elect/shard1/election/72532534578118674-core_node6-n_0000000006
   [junit4]   2> 55056 INFO  (zkCallback-47-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 55056 INFO  (zkCallback-47-thread-3) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 55056 INFO  (zkCallback-47-thread-4) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 55057 INFO  (zkCallback-71-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 55057 INFO  (zkCallback-71-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 55057 INFO  (zkCallback-71-thread-3) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/forceleader_test_collection/state.json] for collection [forceleader_test_collection] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 55059 INFO  (SocketProxy-Acceptor-38474) [    ] o.a.s.c.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48882,localport=38474], receiveBufferSize:531000
   [junit4]   2> 55061 INFO  (zkCallback-71-thread-4) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/ shard1
   [junit4]   2> 55067 INFO  (SocketProxy-Acceptor-38474) [    ] o.a.s.c.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=46202,localport=44712], receiveBufferSize=530904
   [junit4]   2> 55090 INFO  (qtp1309884849-131) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/admin/ping params={wt=javabin&version=2} hits=1 status=0 QTime=22
   [junit4]   2> 55091 INFO  (qtp1309884849-131) [n:127.0.0.1:38474_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node6 x:forceleader_test_collection_shard1_replica_t5] o.a.s.c.S.Request [forceleader_test_collection_shard1_replica_t5]  webapp=/gsn/uk path=/admin/ping params={wt=javabin&version=2} status=0 QTime=22
   [junit4]   2> 55092 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[forceleader_test_collection_shard1_replica_t3]
   [junit4]   2> 55093 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=tlog{file=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J2/temp/solr.cloud.ForceLeaderTest_7A776A947A0461A0-001/shard-1-001/cores/forceleader_test_collection_shard1_replica_t3/data/tlog/tlog.0000000000000000000 refcount=1}}
   [junit4]   2> 55093 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Publishing state of core [forceleader_test_collection_shard1_replica_t3] as recovering, leader is [http://127.0.0.1:38474/gsn/uk/forceleader_test_collection_shard1_replica_t5/] and I am [http://127.0.0.1:43543/gsn/uk/forceleader_test_collection_shard1_replica_t3/]
   [junit4]   2> 55096 INFO  (recoveryExecutor-38-thread-1-processing-n:127.0.0.1:43543_gsn%2Fuk x:forceleader_test_collection_shard1_replica_t3 c:forceleader_test_collection s:shard1 r:core_node4) [n:127.0.0.1:43543_gsn%2Fuk c:forceleader_test_collection s:shard1 r:core_node4 x:forceleader_test_collection_shard1_replica_t3] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:38474/gsn/uk]; [WaitForState: action=PREPRECOVERY&core=forceleader_test_collection_shard1_replica_t5&nodeName=127.0.0.1:43543_gsn%252Fuk&coreNodeName=core_node4&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 55097 INFO  (qtp1309884849-132) [n:127.0.0.1:38474_gsn%2Fuk    x:forceleader_test_collection_shard1_replica_t5] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node4, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 55097 INFO  (qtp1309884849-132) [n:127.0.0.1:38474_gsn%2Fuk    x:forceleader_test_collection_shard1_

[...truncated too long message...]

 [junit4]   2> 433775 INFO  (closeThreadPool-466-thread-2) [    ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.jetty, tag=null
   [junit4]   2> 433775 INFO  (closeThreadPool-466-thread-2) [    ] o.a.s.m.r.SolrJmxReporter Closing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@50e9739e: rootName = null, domain = solr.jetty, service url = null, agent id = null] for registry solr.jetty / com.codahale.metrics.MetricRegistry@77159a3f
   [junit4]   2> 433776 INFO  (closeThreadPool-466-thread-2) [    ] o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.cluster, tag=null
   [junit4]   2> 433780 INFO  (OverseerAutoScalingTriggerThread-72532560122281996-127.0.0.1:39195_gsn%2Fuk-n_0000000001) [    ] o.a.s.c.a.OverseerTriggerThread OverseerTriggerThread woken up but we are closed, exiting.
   [junit4]   2> 433784 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x101affaa8bd000f, likely client has closed socket
   [junit4]   2> 433795 INFO  (closeThreadPool-475-thread-2) [    ] o.a.s.c.Overseer Overseer (id=72532560122281996-127.0.0.1:39195_gsn%2Fuk-n_0000000001) closing
   [junit4]   2> 433824 INFO  (zkCallback-454-thread-4) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (4) -> (3)
   [junit4]   2> 433825 INFO  (closeThreadPool-466-thread-6) [    ] o.a.s.c.Overseer Overseer (id=72532560122281996-127.0.0.1:39195_gsn%2Fuk-n_0000000001) closing
   [junit4]   2> 433826 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x101affaa8bd000e, likely client has closed socket
   [junit4]   2> 433827 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x101affaa8bd0013, likely client has closed socket
   [junit4]   2> 433827 INFO  (zkCallback-454-thread-3) [    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:39367_gsn%2Fuk
   [junit4]   2> 433836 INFO  (closeThreadPool-466-thread-6) [    ] o.e.j.s.AbstractConnector Stopped ServerConnector@39f556b5{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 433837 INFO  (closeThreadPool-466-thread-8) [    ] o.e.j.s.AbstractConnector Stopped ServerConnector@26f3605a{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 433837 INFO  (closeThreadPool-466-thread-8) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@e3b1aa7{/gsn/uk,null,UNAVAILABLE}
   [junit4]   2> 433837 INFO  (closeThreadPool-466-thread-8) [    ] o.e.j.s.session node0 Stopped scavenging
   [junit4]   2> 433842 INFO  (closeThreadPool-466-thread-6) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@5c483dc4{/gsn/uk,null,UNAVAILABLE}
   [junit4]   2> 433842 WARN  (closeThreadPool-466-thread-8) [    ] o.a.s.c.s.c.SocketProxy Closing 8 connections to: http://127.0.0.1:34716/gsn/uk, target: http://127.0.0.1:39229/gsn/uk
   [junit4]   2> 433843 INFO  (closeThreadPool-466-thread-7) [    ] o.e.j.s.AbstractConnector Stopped ServerConnector@662b7f5f{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 433844 INFO  (closeThreadPool-466-thread-6) [    ] o.e.j.s.session node0 Stopped scavenging
   [junit4]   2> 433845 INFO  (closeThreadPool-466-thread-7) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@46994a09{/gsn/uk,null,UNAVAILABLE}
   [junit4]   2> 433846 INFO  (closeThreadPool-466-thread-7) [    ] o.e.j.s.session node0 Stopped scavenging
   [junit4]   2> 433846 WARN  (closeThreadPool-466-thread-7) [    ] o.a.s.c.s.c.SocketProxy Closing 11 connections to: http://127.0.0.1:43494/gsn/uk, target: http://127.0.0.1:32785/gsn/uk
   [junit4]   2> 433849 WARN  (closeThreadPool-466-thread-6) [    ] o.a.s.c.s.c.SocketProxy Closing 7 connections to: http://127.0.0.1:39195/gsn/uk, target: http://127.0.0.1:43515/gsn/uk
   [junit4]   2> 433854 INFO  (closeThreadPool-466-thread-2) [    ] o.e.j.s.AbstractConnector Stopped ServerConnector@60371782{HTTP/1.1,[http/1.1]}{127.0.0.1:36439}
   [junit4]   2> 433854 INFO  (closeThreadPool-466-thread-2) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@4841fef{/gsn/uk,null,UNAVAILABLE}
   [junit4]   2> 433854 INFO  (closeThreadPool-466-thread-2) [    ] o.e.j.s.session node0 Stopped scavenging
   [junit4]   2> 433862 WARN  (closeThreadPool-466-thread-2) [    ] o.a.s.c.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:39367/gsn/uk, target: http://127.0.0.1:36439/gsn/uk
   [junit4]   2> 433862 INFO  (TEST-ForceLeaderTest.testReplicasInLowerTerms-seed#[7A776A947A0461A0]) [    ] o.a.s.c.ZkTestServer Shutting down ZkTestServer.
   [junit4]   2> 433868 INFO  (TEST-ForceLeaderTest.testReplicasInLowerTerms-seed#[7A776A947A0461A0]) [    ] o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:46013
   [junit4]   2> 433868 INFO  (TEST-ForceLeaderTest.testReplicasInLowerTerms-seed#[7A776A947A0461A0]) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1 46013
   [junit4]   2> 433871 WARN  (ZkTestServer Run Thread) [    ] o.a.s.c.ZkTestServer Watch limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2> 	46	/solr/collections/forceleader_lower_terms_collection/terms/shard1
   [junit4]   2> 	13	/solr/aliases.json
   [junit4]   2> 	9	/solr/collections/collection1/terms/shard2
   [junit4]   2> 	5	/solr/security.json
   [junit4]   2> 	5	/solr/configs/conf1
   [junit4]   2> 	3	/solr/collections/forceleader_lower_terms_collection/state.json
   [junit4]   2> 	3	/solr/collections/control_collection/terms/shard1
   [junit4]   2> 	2	/solr/collections/collection1/terms/shard1
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2> 	44	/solr/collections/forceleader_lower_terms_collection/state.json
   [junit4]   2> 	30	/solr/collections/collection1/state.json
   [junit4]   2> 	14	/solr/collections/control_collection/state.json
   [junit4]   2> 	13	/solr/clusterprops.json
   [junit4]   2> 	13	/solr/clusterstate.json
   [junit4]   2> 	2	/solr/autoscaling.json
   [junit4]   2> 	2	/solr/collections/forceleader_lower_terms_collection/leader_elect/shard1/election/72532560122281998-core_node3-n_0000000001
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2> 	13	/solr/collections
   [junit4]   2> 	12	/solr/live_nodes
   [junit4]   2> 	2	/solr/overseer/queue
   [junit4]   2> 	2	/solr/autoscaling/events/.scheduled_maintenance
   [junit4]   2> 	2	/solr/autoscaling/events/.auto_add_replicas
   [junit4]   2> 	2	/solr/overseer/queue-work
   [junit4]   2> 	2	/solr/overseer/collection-queue-work
   [junit4]   2> 
   [junit4] OK      38.4s J0 | ForceLeaderTest.testReplicasInLowerTerms
   [junit4]   2> NOTE: leaving temporary files on disk at: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/solr/build/solr-core/test/J0/temp/solr.cloud.ForceLeaderTest_7A776A947A0461A0-001
   [junit4]   2> Dec 29, 2018 7:44:30 PM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 1 leaked thread(s).
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene70): {multiDefault=PostingsFormat(name=Memory), a_t=PostingsFormat(name=Memory), id=Lucene50(blocksize=128), text=FST50}, docValues:{range_facet_l_dv=DocValuesFormat(name=Lucene70), _version_=DocValuesFormat(name=Asserting), intDefault=DocValuesFormat(name=Asserting), id_i1=DocValuesFormat(name=Asserting), range_facet_i_dv=DocValuesFormat(name=Lucene70), intDvoDefault=DocValuesFormat(name=Lucene70), range_facet_l=DocValuesFormat(name=Lucene70), timestamp=DocValuesFormat(name=Lucene70)}, maxPointsInLeafNode=459, maxMBSortInHeap=6.114907444039353, sim=RandomSimilarity(queryNorm=true): {}, locale=en-NZ, timezone=Pacific/Ponape
   [junit4]   2> NOTE: Linux 4.4.0-137-generic amd64/Oracle Corporation 1.8.0_191 (64-bit)/cpus=4,threads=1,free=242019128,total=504365056
   [junit4]   2> NOTE: All tests run in this JVM: [ForceLeaderTest, ForceLeaderTest]
   [junit4] Completed [5/5 (3!)] on J0 in 208.23s, 3 tests, 1 error, 1 skipped <<< FAILURES!
   [junit4] 
   [junit4] 
   [junit4] Tests with failures [seed: 7A776A947A0461A0]:
   [junit4]   - org.apache.solr.cloud.ForceLeaderTest.testReplicasInLIRNoLeader
   [junit4]   - org.apache.solr.cloud.ForceLeaderTest.testReplicasInLIRNoLeader
   [junit4]   - org.apache.solr.cloud.ForceLeaderTest.testReplicasInLIRNoLeader
   [junit4] 
   [junit4] 
   [junit4] JVM J0:     1.01 ..  4573.00 =  4571.99s
   [junit4] JVM J1:     0.86 ..  3325.50 =  3324.64s
   [junit4] JVM J2:     0.86 ..  1879.17 =  1878.31s
   [junit4] Execution time total: 1 hour 16 minutes 13 seconds
   [junit4] Tests summary: 5 suites, 15 tests, 3 errors, 5 ignored

BUILD FAILED
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/lucene/common-build.xml:1572: The following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-repro/lucene/common-build.xml:1099: There were test failures: 5 suites, 15 tests, 3 errors, 5 ignored [seed: 7A776A947A0461A0]

Total time: 76 minutes 16 seconds

[repro] Setting last failure code to 256

[repro] Failures:
[repro]   3/5 failed: org.apache.solr.cloud.ForceLeaderTest
[repro] git checkout 345a655f216258c406c384ada9aa6d5f14e254f9
Previous HEAD position was 9aa15a2... SOLR-12973: Admin UI Nodes view support for replica* replica names. (Daniel Collins, Christine Poerschke, janhoy)
HEAD is now at 345a655... SOLR-12973: Admin UI Nodes view support for replica* replica names. (Daniel Collins, Christine Poerschke, janhoy)
[repro] Exiting with code 256
Archiving artifacts
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)

[JENKINS] Lucene-Solr-repro - Build # 2583 - Still Unstable

Posted by Apache Jenkins Server <je...@builds.apache.org>.
Build: https://builds.apache.org/job/Lucene-Solr-repro/2583/

[...truncated 28 lines...]
[repro] Jenkins log URL: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-master/247/consoleText

[repro] Revision: 345a655f216258c406c384ada9aa6d5f14e254f9

[repro] Repro line:  ant test  -Dtestcase=TestSimTriggerIntegration -Dtests.method=testNodeMarkersRegistration -Dtests.seed=21F7DF3C0DA239EF -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true -Dtests.locale=en-CA -Dtests.timezone=Pacific/Pago_Pago -Dtests.asserts=true -Dtests.file.encoding=UTF-8

[repro] Repro line:  ant test  -Dtestcase=TestSimTriggerIntegration -Dtests.method=testNodeAddedTriggerRestoreState -Dtests.seed=21F7DF3C0DA239EF -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true -Dtests.locale=en-CA -Dtests.timezone=Pacific/Pago_Pago -Dtests.asserts=true -Dtests.file.encoding=UTF-8

[repro] git rev-parse --abbrev-ref HEAD
[repro] git rev-parse HEAD
[repro] Initial local git branch/revision: 345a655f216258c406c384ada9aa6d5f14e254f9
[repro] git fetch
[repro] git checkout 345a655f216258c406c384ada9aa6d5f14e254f9

[...truncated 1 lines...]
[repro] git merge --ff-only

[...truncated 1 lines...]
[repro] ant clean

[...truncated 6 lines...]
[repro] Test suites by module:
[repro]    solr/core
[repro]       TestSimTriggerIntegration
[repro] ant compile-test

[...truncated 3592 lines...]
[repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=5 -Dtests.class="*.TestSimTriggerIntegration" -Dtests.showOutput=onerror  -Dtests.seed=21F7DF3C0DA239EF -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true -Dtests.locale=en-CA -Dtests.timezone=Pacific/Pago_Pago -Dtests.asserts=true -Dtests.file.encoding=UTF-8

[...truncated 7004 lines...]
[repro] Setting last failure code to 256

[repro] Failures:
[repro]   5/5 failed: org.apache.solr.cloud.autoscaling.sim.TestSimTriggerIntegration

[repro] Re-testing 100% failures at the tip of master
[repro] git fetch
[repro] git checkout master

[...truncated 4 lines...]
[repro] git merge --ff-only

[...truncated 86 lines...]
[repro] ant clean

[...truncated 8 lines...]
[repro] Test suites by module:
[repro]    solr/core
[repro]       TestSimTriggerIntegration
[repro] ant compile-test

[...truncated 3592 lines...]
[repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=5 -Dtests.class="*.TestSimTriggerIntegration" -Dtests.showOutput=onerror  -Dtests.seed=21F7DF3C0DA239EF -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true -Dtests.locale=en-CA -Dtests.timezone=Pacific/Pago_Pago -Dtests.asserts=true -Dtests.file.encoding=UTF-8

[...truncated 5281 lines...]
[repro] Setting last failure code to 256

[repro] Failures at the tip of master:
[repro]   4/5 failed: org.apache.solr.cloud.autoscaling.sim.TestSimTriggerIntegration
[repro] git checkout 345a655f216258c406c384ada9aa6d5f14e254f9

[...truncated 8 lines...]
[repro] Exiting with code 256

[...truncated 5 lines...]