You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@lucene.apache.org by Policeman Jenkins Server <je...@thetaphi.de> on 2017/01/08 05:44:36 UTC

[JENKINS] Lucene-Solr-master-Linux (64bit/jdk1.8.0_112) - Build # 18724 - Unstable!

Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/18724/
Java: 64bit/jdk1.8.0_112 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC

1 tests failed.
FAILED:  org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test

Error Message:
Expected 2 of 3 replicas to be active but only found 1; [core_node3:{"core":"c8n_1x3_lf_shard1_replica1","base_url":"http://127.0.0.1:39961","node_name":"127.0.0.1:39961_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/30)={   "replicationFactor":"3",   "shards":{"shard1":{       "range":"80000000-7fffffff",       "state":"active",       "replicas":{         "core_node1":{           "state":"down",           "base_url":"http://127.0.0.1:39236",           "core":"c8n_1x3_lf_shard1_replica3",           "node_name":"127.0.0.1:39236_"},         "core_node2":{           "core":"c8n_1x3_lf_shard1_replica2",           "base_url":"http://127.0.0.1:41282",           "node_name":"127.0.0.1:41282_",           "state":"down"},         "core_node3":{           "core":"c8n_1x3_lf_shard1_replica1",           "base_url":"http://127.0.0.1:39961",           "node_name":"127.0.0.1:39961_",           "state":"active",           "leader":"true"}}}},   "router":{"name":"compositeId"},   "maxShardsPerNode":"1",   "autoAddReplicas":"false"}

Stack Trace:
java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 1; [core_node3:{"core":"c8n_1x3_lf_shard1_replica1","base_url":"http://127.0.0.1:39961","node_name":"127.0.0.1:39961_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/30)={
  "replicationFactor":"3",
  "shards":{"shard1":{
      "range":"80000000-7fffffff",
      "state":"active",
      "replicas":{
        "core_node1":{
          "state":"down",
          "base_url":"http://127.0.0.1:39236",
          "core":"c8n_1x3_lf_shard1_replica3",
          "node_name":"127.0.0.1:39236_"},
        "core_node2":{
          "core":"c8n_1x3_lf_shard1_replica2",
          "base_url":"http://127.0.0.1:41282",
          "node_name":"127.0.0.1:41282_",
          "state":"down"},
        "core_node3":{
          "core":"c8n_1x3_lf_shard1_replica1",
          "base_url":"http://127.0.0.1:39961",
          "node_name":"127.0.0.1:39961_",
          "state":"active",
          "leader":"true"}}}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"1",
  "autoAddReplicas":"false"}
	at __randomizedtesting.SeedInfo.seed([749AE4B6842C4694:FCCEDB6C2AD02B6C]:0)
	at org.junit.Assert.fail(Assert.java:93)
	at org.junit.Assert.assertTrue(Assert.java:43)
	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:170)
	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:57)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
	at java.lang.Thread.run(Thread.java:745)




Build Log:
[...truncated 11771 lines...]
   [junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
   [junit4]   2> Creating dataDir: /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/init-core-data-001
   [junit4]   2> 683307 INFO  (SUITE-LeaderFailoverAfterPartitionTest-seed#[749AE4B6842C4694]-worker) [    ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 683307 INFO  (SUITE-LeaderFailoverAfterPartitionTest-seed#[749AE4B6842C4694]-worker) [    ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   2> 683309 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 683309 INFO  (Thread-1639) [    ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 683309 INFO  (Thread-1639) [    ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 683409 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkTestServer start zk server on port:40734
   [junit4]   2> 683419 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
   [junit4]   2> 683422 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml
   [junit4]   2> 683424 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 683425 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
   [junit4]   2> 683426 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
   [junit4]   2> 683427 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
   [junit4]   2> 683428 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
   [junit4]   2> 683428 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 683429 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 683430 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
   [junit4]   2> 683432 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractZkTestCase put /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
   [junit4]   2> 683570 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/control-001/cores/collection1
   [junit4]   2> 683575 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 683577 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@26cb6f9f{/,null,AVAILABLE}
   [junit4]   2> 683589 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@669fcb29{HTTP/1.1,[http/1.1]}{127.0.0.1:43297}
   [junit4]   2> 683589 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server Started @686119ms
   [junit4]   2> 683589 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/tempDir-001/control/data, hostContext=/, hostPort=39961, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/control-001/cores}
   [junit4]   2> 683590 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 683590 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 7.0.0
   [junit4]   2> 683590 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 683590 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 683590 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-01-08T05:08:42.666Z
   [junit4]   2> 683597 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 683597 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/control-001/solr.xml
   [junit4]   2> 683606 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40734/solr
   [junit4]   2> 683629 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39961_    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:39961_
   [junit4]   2> 683630 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39961_    ] o.a.s.c.Overseer Overseer (id=97245732692688900-127.0.0.1:39961_-n_0000000000) starting
   [junit4]   2> 683634 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39961_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39961_
   [junit4]   2> 683637 INFO  (OverseerStateUpdate-97245732692688900-127.0.0.1:39961_-n_0000000000) [n:127.0.0.1:39961_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 683709 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39961_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/control-001/cores
   [junit4]   2> 683709 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39961_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 683713 INFO  (OverseerStateUpdate-97245732692688900-127.0.0.1:39961_-n_0000000000) [n:127.0.0.1:39961_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1
   [junit4]   2> 684722 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 684736 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 684808 WARN  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 684810 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 684818 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection control_collection
   [junit4]   2> 684819 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/control-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/control-001/cores/collection1/data/]
   [junit4]   2> 684819 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@273d805e
   [junit4]   2> 684820 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=47, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 684823 WARN  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 684831 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 684831 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 684832 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 684832 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 684832 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=20, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 684833 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@7de97499[collection1] main]
   [junit4]   2> 684833 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 684834 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 684834 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 684834 INFO  (searcherExecutor-3797-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@7de97499[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 684835 INFO  (coreLoadExecutor-3796-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_ c:control_collection   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1555931724682100736
   [junit4]   2> 684840 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 684840 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 684840 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39961/collection1/
   [junit4]   2> 684840 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 684840 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:39961/collection1/ has no replicas
   [junit4]   2> 684842 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39961/collection1/ shard1
   [junit4]   2> 684993 INFO  (coreZkRegister-3789-thread-1-processing-n:127.0.0.1:39961_ x:collection1 c:control_collection) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 685128 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 685128 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:40734/solr ready
   [junit4]   2> 685129 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 685197 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001/cores/collection1
   [junit4]   2> 685198 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001
   [junit4]   2> 685198 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 685199 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@6f61eb93{/,null,AVAILABLE}
   [junit4]   2> 685199 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@1b18ba8f{HTTP/1.1,[http/1.1]}{127.0.0.1:40102}
   [junit4]   2> 685199 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server Started @687729ms
   [junit4]   2> 685199 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/tempDir-001/jetty1, solrconfig=solrconfig.xml, hostContext=/, hostPort=36407, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001/cores}
   [junit4]   2> 685199 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 685200 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 7.0.0
   [junit4]   2> 685200 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 685200 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 685200 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-01-08T05:08:44.276Z
   [junit4]   2> 685201 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 685201 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001/solr.xml
   [junit4]   2> 685207 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40734/solr
   [junit4]   2> 685212 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:36407_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 685215 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:36407_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:36407_
   [junit4]   2> 685216 INFO  (zkCallback-3393-thread-2-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 685216 INFO  (zkCallback-3397-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 685217 INFO  (zkCallback-3402-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 685302 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:36407_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001/cores
   [junit4]   2> 685303 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:36407_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 685305 INFO  (OverseerStateUpdate-97245732692688900-127.0.0.1:39961_-n_0000000000) [n:127.0.0.1:39961_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 686315 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 686327 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 686403 WARN  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 686405 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 686415 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 686415 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 686415 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@273d805e
   [junit4]   2> 686417 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=47, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 686422 WARN  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 686432 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 686432 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 686433 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 686433 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 686433 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=20, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 686434 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@12b07aa3[collection1] main]
   [junit4]   2> 686434 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 686435 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 686435 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 686436 INFO  (searcherExecutor-3808-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@12b07aa3[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 686436 INFO  (coreLoadExecutor-3807-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1555931726360870912
   [junit4]   2> 686441 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 686441 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 686441 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:36407/collection1/
   [junit4]   2> 686441 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 686441 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:36407/collection1/ has no replicas
   [junit4]   2> 686443 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:36407/collection1/ shard2
   [junit4]   2> 686596 INFO  (coreZkRegister-3802-thread-1-processing-n:127.0.0.1:36407_ x:collection1 c:collection1) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 686749 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001/cores/collection1
   [junit4]   2> 686749 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001
   [junit4]   2> 686750 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 686751 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@1038829f{/,null,AVAILABLE}
   [junit4]   2> 686751 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@60133873{HTTP/1.1,[http/1.1]}{127.0.0.1:36343}
   [junit4]   2> 686751 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server Started @689281ms
   [junit4]   2> 686751 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/tempDir-001/jetty2, solrconfig=solrconfig.xml, hostContext=/, hostPort=39236, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001/cores}
   [junit4]   2> 686751 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 686751 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 7.0.0
   [junit4]   2> 686751 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 686752 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 686752 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-01-08T05:08:45.828Z
   [junit4]   2> 686754 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 686754 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001/solr.xml
   [junit4]   2> 686760 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40734/solr
   [junit4]   2> 686767 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39236_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 686770 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39236_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:39236_
   [junit4]   2> 686770 INFO  (zkCallback-3397-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 686770 INFO  (zkCallback-3402-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 686770 INFO  (zkCallback-3393-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 686776 INFO  (zkCallback-3408-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 686801 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39236_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001/cores
   [junit4]   2> 686801 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:39236_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 686802 INFO  (OverseerStateUpdate-97245732692688900-127.0.0.1:39961_-n_0000000000) [n:127.0.0.1:39961_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1
   [junit4]   2> 687810 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 687820 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 687889 WARN  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 687891 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 687904 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 687904 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 687904 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@273d805e
   [junit4]   2> 687905 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=47, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 687910 WARN  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 687918 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 687918 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 687919 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 687919 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 687921 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=20, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 687921 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@769e3fc3[collection1] main]
   [junit4]   2> 687922 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 687922 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 687922 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 687923 INFO  (coreLoadExecutor-3818-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1555931727920103424
   [junit4]   2> 687924 INFO  (searcherExecutor-3819-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@769e3fc3[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 687927 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 687927 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 687927 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:39236/collection1/
   [junit4]   2> 687927 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 687927 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:39236/collection1/ has no replicas
   [junit4]   2> 687929 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:39236/collection1/ shard1
   [junit4]   2> 688081 INFO  (coreZkRegister-3813-thread-1-processing-n:127.0.0.1:39236_ x:collection1 c:collection1) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 688245 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores/collection1
   [junit4]   2> 688246 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001
   [junit4]   2> 688246 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 688251 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@441971a5{/,null,AVAILABLE}
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.AbstractConnector Started ServerConnector@2b301ba3{HTTP/1.1,[http/1.1]}{127.0.0.1:35468}
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.Server Started @690781ms
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/tempDir-001/jetty3, solrconfig=solrconfig.xml, hostContext=/, hostPort=41282, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores}
   [junit4]   2> 688252 ERROR (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 7.0.0
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 688252 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2017-01-08T05:08:47.328Z
   [junit4]   2> 688256 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 688257 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/solr.xml
   [junit4]   2> 688262 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40734/solr
   [junit4]   2> 688268 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:41282_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 688271 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:41282_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:41282_
   [junit4]   2> 688271 INFO  (zkCallback-3408-thread-1-processing-n:127.0.0.1:39236_) [n:127.0.0.1:39236_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 688271 INFO  (zkCallback-3397-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 688271 INFO  (zkCallback-3393-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 688272 INFO  (zkCallback-3414-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 688272 INFO  (zkCallback-3402-thread-1-processing-n:127.0.0.1:36407_) [n:127.0.0.1:36407_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 688323 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:41282_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores
   [junit4]   2> 688323 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [n:127.0.0.1:41282_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 688325 INFO  (OverseerStateUpdate-97245732692688900-127.0.0.1:39961_-n_0000000000) [n:127.0.0.1:39961_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 689333 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 689357 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 689431 WARN  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 689434 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 689442 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 689442 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores/collection1], dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 689442 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@273d805e
   [junit4]   2> 689444 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=47, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 689448 WARN  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 689458 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 689458 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 689459 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 689459 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 689461 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=20, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 689461 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@3de9b1c1[collection1] main]
   [junit4]   2> 689462 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 689462 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 689462 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 689463 INFO  (searcherExecutor-3830-thread-1-processing-n:127.0.0.1:41282_ x:collection1 c:collection1) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@3de9b1c1[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 689464 INFO  (coreLoadExecutor-3829-thread-1-processing-n:127.0.0.1:41282_) [n:127.0.0.1:41282_ c:collection1   x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1555931729535959040
   [junit4]   2> 689469 INFO  (coreZkRegister-3824-thread-1-processing-n:127.0.0.1:41282_ x:collection1 c:collection1) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to recover:collection1
   [junit4]   2> 689469 INFO  (updateExecutor-3411-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 689470 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
   [junit4]   2> 689470 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
   [junit4]   2> 689470 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[collection1]
   [junit4]   2> 689470 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 689470 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core [collection1] as recovering, leader is [http://127.0.0.1:36407/collection1/] and I am [http://127.0.0.1:41282/collection1/]
   [junit4]   2> 689473 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:36407]; [WaitForState: action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:41282_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 689473 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48854,localport=36407], receiveBufferSize:531000
   [junit4]   2> 689474 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=40102,localport=46786], receiveBufferSize=530904
   [junit4]   2> 689474 INFO  (qtp658637168-14560) [n:127.0.0.1:36407_    ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 689475 INFO  (qtp658637168-14560) [n:127.0.0.1:36407_    ] o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 (shard2 of collection1) have state: recovering
   [junit4]   2> 689475 INFO  (qtp658637168-14560) [n:127.0.0.1:36407_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=down, localState=active, nodeName=127.0.0.1:41282_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:41282","node_name":"127.0.0.1:41282_","state":"down"}
   [junit4]   2> 689668 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 689668 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 30000 for each attempt
   [junit4]   2> 689668 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 690475 INFO  (qtp658637168-14560) [n:127.0.0.1:36407_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:41282_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:41282","node_name":"127.0.0.1:41282_","state":"recovering"}
   [junit4]   2> 690475 INFO  (qtp658637168-14560) [n:127.0.0.1:36407_    ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 690475 INFO  (qtp658637168-14560) [n:127.0.0.1:36407_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:41282_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=1000
   [junit4]   2> 697476 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync from [http://127.0.0.1:36407/collection1/] - recoveringAfterStartup=[true]
   [junit4]   2> 697477 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1 url=http://127.0.0.1:41282 START replicas=[http://127.0.0.1:36407/collection1/] nUpdates=100
   [junit4]   2> 697477 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48872,localport=36407], receiveBufferSize:531000
   [junit4]   2> 697478 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=40102,localport=46804], receiveBufferSize=530904
   [junit4]   2> 697478 INFO  (qtp658637168-14555) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 697479 INFO  (qtp658637168-14555) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/get params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 697479 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 697479 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. No need to do a PeerSync 
   [junit4]   2> 697479 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 697479 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 697480 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 697480 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery was successful.
   [junit4]   2> 697480 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered during PeerSync.
   [junit4]   2> 697480 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
   [junit4]   2> 697480 INFO  (recoveryExecutor-3412-thread-1-processing-n:127.0.0.1:41282_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
   [junit4]   2> 697669 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 697670 INFO  (SocketProxy-Acceptor-39961) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45004,localport=39961], receiveBufferSize:531000
   [junit4]   2> 697670 INFO  (SocketProxy-Acceptor-39961) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43297,localport=51328], receiveBufferSize=530904
   [junit4]   2> 697671 INFO  (qtp210716555-14523) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 697671 INFO  (qtp210716555-14523) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 697672 INFO  (qtp210716555-14523) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 697672 INFO  (qtp210716555-14523) [n:127.0.0.1:39961_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 1
   [junit4]   2> 697673 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=56490,localport=39236], receiveBufferSize:531000
   [junit4]   2> 697674 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=36343,localport=51406], receiveBufferSize=530904
   [junit4]   2> 697677 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=56494,localport=39236], receiveBufferSize:531000
   [junit4]   2> 697677 INFO  (SocketProxy-Acceptor-41282) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=58512,localport=41282], receiveBufferSize:531000
   [junit4]   2> 697677 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48888,localport=36407], receiveBufferSize:531000
   [junit4]   2> 697678 INFO  (qtp658637168-14562) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 697678 INFO  (qtp658637168-14562) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 697679 INFO  (SocketProxy-Acceptor-41282) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=35468,localport=54026], receiveBufferSize=530904
   [junit4]   2> 697679 INFO  (qtp658637168-14562) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 697679 INFO  (qtp658637168-14562) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:39236/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 1
   [junit4]   2> 697679 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 697679 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 697679 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 697680 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:39236/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 0
   [junit4]   2> 697680 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=40102,localport=46824], receiveBufferSize=530904
   [junit4]   2> 697680 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=36343,localport=51414], receiveBufferSize=530904
   [junit4]   2> 697680 INFO  (qtp897677364-14591) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 697680 INFO  (qtp897677364-14591) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 697681 INFO  (qtp897677364-14591) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 697681 INFO  (qtp897677364-14591) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:39236/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 1
   [junit4]   2> 697682 INFO  (qtp897677364-14589) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 8
   [junit4]   2> 697683 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48896,localport=36407], receiveBufferSize:531000
   [junit4]   2> 697684 INFO  (SocketProxy-Acceptor-36407) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=40102,localport=46828], receiveBufferSize=530904
   [junit4]   2> 697685 INFO  (qtp658637168-14555) [n:127.0.0.1:36407_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0
   [junit4]   2> 697686 INFO  (SocketProxy-Acceptor-41282) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=58526,localport=41282], receiveBufferSize:531000
   [junit4]   2> 697687 INFO  (qtp1859332264-14614) [n:127.0.0.1:41282_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0
   [junit4]   2> 697687 INFO  (SocketProxy-Acceptor-41282) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=35468,localport=54036], receiveBufferSize=530904
   [junit4]   2> 697690 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=56514,localport=39236], receiveBufferSize:531000
   [junit4]   2> 697695 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=36343,localport=51430], receiveBufferSize=530904
   [junit4]   2> 697695 INFO  (qtp897677364-14585) [n:127.0.0.1:39236_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0
   [junit4]   2> 699696 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Creating collection with stateFormat=1: c8n_1x3_lf
   [junit4]   2> 699698 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=56518,localport=39236], receiveBufferSize:531000
   [junit4]   2> 699698 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=36343,localport=51434], receiveBufferSize=530904
   [junit4]   2> 699698 INFO  (qtp897677364-14589) [n:127.0.0.1:39236_    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params replicationFactor=3&maxShardsPerNode=1&name=c8n_1x3_lf&action=CREATE&numShards=1&stateFormat=1&wt=javabin&version=2 and sendToOCPQueue=true
   [junit4]   2> 699700 INFO  (OverseerThreadFactory-3794-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_    ] o.a.s.c.CreateCollectionCmd Create collection c8n_1x3_lf
   [junit4]   2> 699700 INFO  (OverseerThreadFactory-3794-thread-1-processing-n:127.0.0.1:39961_) [n:127.0.0.1:39961_    ] o.a.s.c.CreateCollectionCmd Only one config set found in zk - using it:conf1
   [junit4]   2> 699803 INFO  (SocketProxy-Acceptor-39961) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45044,localport=39961], receiveBufferSize:531000
   [junit4]   2> 699803 INFO  (SocketProxy-Acceptor-41282) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=58538,localport=41282], receiveBufferSize:531000
   [junit4]   2> 699803 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=56522,localport=39236], receiveBufferSize:531000
   [junit4]   2> 699803 INFO  (SocketProxy-Acceptor-39961) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=43297,localport=51368], receiveBufferSize=530904
   [junit4]   2> 699804 INFO  (SocketProxy-Acceptor-41282) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=35468,localport=54054], receiveBufferSize=530904
   [junit4]   2> 699804 INFO  (SocketProxy-Acceptor-39236) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=36343,localport=51446], receiveBufferSize=530904
   [junit4]   2> 699804 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica2&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2
   [junit4]   2> 699804 INFO  (qtp897677364-14585) [n:127.0.0.1:39236_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica3&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2
   [junit4]   2> 699804 INFO  (qtp210716555-14522) [n:127.0.0.1:39961_    ] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_1x3_lf_shard1_replica1&action=CREATE&numShards=1&collection=c8n_1x3_lf&shard=shard1&wt=javabin&version=2
   [junit4]   2> 700814 INFO  (qtp897677364-14585) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica3] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 700816 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 700817 INFO  (qtp210716555-14522) [n:127.0.0.1:39961_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 700826 INFO  (qtp897677364-14585) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica3] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica3] Schema name=test
   [junit4]   2> 700867 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica2] Schema name=test
   [junit4]   2> 700867 INFO  (qtp210716555-14522) [n:127.0.0.1:39961_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica1] Schema name=test
   [junit4]   2> 701117 WARN  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica2] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 701121 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 701134 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.c.CoreContainer Creating SolrCore 'c8n_1x3_lf_shard1_replica2' using configuration from collection c8n_1x3_lf
   [junit4]   2> 701134 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.c.SolrCore [[c8n_1x3_lf_shard1_replica2] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores/c8n_1x3_lf_shard1_replica2], dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001/shard-3-001/cores/c8n_1x3_lf_shard1_replica2/data/]
   [junit4]   2> 701135 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@273d805e
   [junit4]   2> 701137 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy: minMergeSize=1677721, mergeFactor=47, maxMergeSize=2147483648, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 701145 WARN  (qtp897677364-14585) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica3] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica3] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 701147 WARN  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = {initParams=a, name=/dump, class=DumpRequestHandler},args = {defaults={a=A,b=B}}}
   [junit4]   2> 701147 INFO  (qtp897677364-14585) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica3] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 701149 WARN  (qtp210716555-14522) [n:127.0.0.1:39961_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema [c8n_1x3_lf_shard1_replica1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 701152 INFO  (qtp210716555-14522) [n:127.0.0.1:39961_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 701156 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 701156 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 701157 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 701157 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 701157 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: minMergeSize=1000, mergeFactor=20, maxMergeSize=9223372036854775807, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0]
   [junit4]   2> 701158 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.s.SolrIndexSearcher Opening [Searcher@57370cff[c8n_1x3_lf_shard1_replica2] main]
   [junit4]   2> 701158 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 701159 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 701159 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 701159 INFO  (searcherExecutor-3835-thread-1-processing-n:127.0.0.1:41282_ x:c8n_1x3_lf_shard1_replica2 s:shard1 c:c8n_1x3_lf) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.c.SolrCore [c8n_1x3_lf_shard1_replica2] Registered new searcher Searcher@57370cff[c8n_1x3_lf_shard1_replica2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 701160 INFO  (qtp1859332264-14618) [n:127.0.0.1:41282_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica2] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1555931741800103936
   [junit4]   2> 701160 INFO  (qtp897677364-14585) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1  x:c8n_1x3_lf_shard1_replica3] o.a.s.c.Cor

[...truncated too long message...]

-3406-thread-1-processing-n:127.0.0.1:39236_ x:c8n_1x3_lf_shard1_replica3 s:shard1 c:c8n_1x3_lf r:core_node1) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1 r:core_node1 x:c8n_1x3_lf_shard1_replica3] o.a.s.c.RecoveryStrategy Stopping recovery for core=[c8n_1x3_lf_shard1_replica3] coreNodeName=[core_node1]
   [junit4]   2> 826998 INFO  (recoveryExecutor-3406-thread-1-processing-n:127.0.0.1:39236_ x:c8n_1x3_lf_shard1_replica3 s:shard1 c:c8n_1x3_lf r:core_node1) [n:127.0.0.1:39236_ c:c8n_1x3_lf s:shard1 r:core_node1 x:c8n_1x3_lf_shard1_replica3] o.a.s.m.SolrMetricManager Closing metric reporters for: solr.core.c8n_1x3_lf.shard1.replica3
   [junit4]   2> 826999 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.Overseer Overseer (id=97245732692688907-127.0.0.1:39236_-n_0000000002) closing
   [junit4]   2> 826999 INFO  (OverseerStateUpdate-97245732692688907-127.0.0.1:39236_-n_0000000002) [n:127.0.0.1:39236_    ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:39236_
   [junit4]   2> 827000 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.m.SolrMetricManager Closing metric reporters for: solr.node
   [junit4]   2> 827001 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.AbstractConnector Stopped ServerConnector@60133873{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 827001 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@1038829f{/,null,UNAVAILABLE}
   [junit4]   2> 827001 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ChaosMonkey monkey: stop shard! 41282
   [junit4]   2> 827002 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:40734 40734
   [junit4]   2> 832107 INFO  (Thread-1639) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:40734 40734
   [junit4]   2> 832108 WARN  (Thread-1639) [    ] o.a.s.c.ZkTestServer Watch limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2> 	5	/solr/aliases.json
   [junit4]   2> 	5	/solr/clusterprops.json
   [junit4]   2> 	4	/solr/security.json
   [junit4]   2> 	4	/solr/configs/conf1
   [junit4]   2> 	3	/solr/collections/c8n_1x3_lf/state.json
   [junit4]   2> 	3	/solr/collections/collection1/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2> 	5	/solr/clusterstate.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2> 	143	/solr/overseer/collection-queue-work
   [junit4]   2> 	34	/solr/overseer/queue
   [junit4]   2> 	9	/solr/overseer/queue-work
   [junit4]   2> 	5	/solr/live_nodes
   [junit4]   2> 	5	/solr/collections
   [junit4]   2> 
   [junit4]   2> 832108 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:39236/, target: http://127.0.0.1:36343/
   [junit4]   2> 832108 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SocketProxy Closing 4 connections to: http://127.0.0.1:36407/, target: http://127.0.0.1:40102/
   [junit4]   2> 832108 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:41282/, target: http://127.0.0.1:35468/
   [junit4]   2> 832108 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[749AE4B6842C4694]) [    ] o.a.s.c.SocketProxy Closing 16 connections to: http://127.0.0.1:39961/, target: http://127.0.0.1:43297/
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test -Dtests.seed=749AE4B6842C4694 -Dtests.multiplier=3 -Dtests.slow=true -Dtests.locale=es-CL -Dtests.timezone=Etc/GMT+12 -Dtests.asserts=true -Dtests.file.encoding=UTF-8
   [junit4] FAILURE  149s J2 | LeaderFailoverAfterPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 1; [core_node3:{"core":"c8n_1x3_lf_shard1_replica1","base_url":"http://127.0.0.1:39961","node_name":"127.0.0.1:39961_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/30)={
   [junit4]    >   "replicationFactor":"3",
   [junit4]    >   "shards":{"shard1":{
   [junit4]    >       "range":"80000000-7fffffff",
   [junit4]    >       "state":"active",
   [junit4]    >       "replicas":{
   [junit4]    >         "core_node1":{
   [junit4]    >           "state":"down",
   [junit4]    >           "base_url":"http://127.0.0.1:39236",
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica3",
   [junit4]    >           "node_name":"127.0.0.1:39236_"},
   [junit4]    >         "core_node2":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica2",
   [junit4]    >           "base_url":"http://127.0.0.1:41282",
   [junit4]    >           "node_name":"127.0.0.1:41282_",
   [junit4]    >           "state":"down"},
   [junit4]    >         "core_node3":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica1",
   [junit4]    >           "base_url":"http://127.0.0.1:39961",
   [junit4]    >           "node_name":"127.0.0.1:39961_",
   [junit4]    >           "state":"active",
   [junit4]    >           "leader":"true"}}}},
   [junit4]    >   "router":{"name":"compositeId"},
   [junit4]    >   "maxShardsPerNode":"1",
   [junit4]    >   "autoAddReplicas":"false"}
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([749AE4B6842C4694:FCCEDB6C2AD02B6C]:0)
   [junit4]    > 	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:170)
   [junit4]    > 	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:57)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
   [junit4]    > 	at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 832110 INFO  (SUITE-LeaderFailoverAfterPartitionTest-seed#[749AE4B6842C4694]-worker) [    ] o.a.s.SolrTestCaseJ4 ###deleteCore
   [junit4]   2> NOTE: leaving temporary files on disk at: /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp/solr.cloud.LeaderFailoverAfterPartitionTest_749AE4B6842C4694-001
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene70): {range_facet_l_dv=PostingsFormat(name=Memory), _version_=BlockTreeOrds(blocksize=128), multiDefault=PostingsFormat(name=Asserting), a_t=BlockTreeOrds(blocksize=128), intDefault=BlockTreeOrds(blocksize=128), id=PostingsFormat(name=Memory), range_facet_i_dv=BlockTreeOrds(blocksize=128), text=PostingsFormat(name=Memory), range_facet_l=BlockTreeOrds(blocksize=128), timestamp=BlockTreeOrds(blocksize=128)}, docValues:{range_facet_l_dv=DocValuesFormat(name=Lucene70), range_facet_i_dv=DocValuesFormat(name=Asserting), timestamp=DocValuesFormat(name=Asserting)}, maxPointsInLeafNode=260, maxMBSortInHeap=6.544446548909733, sim=RandomSimilarity(queryNorm=true): {}, locale=es-CL, timezone=Etc/GMT+12
   [junit4]   2> NOTE: Linux 4.4.0-53-generic amd64/Oracle Corporation 1.8.0_112 (64-bit)/cpus=12,threads=1,free=148822536,total=515899392
   [junit4]   2> NOTE: All tests run in this JVM: [TestMergePolicyConfig, TestShortCircuitedRequests, TestGroupingSearch, ManagedSchemaRoundRobinCloudTest, BadCopyFieldTest, TestDFISimilarityFactory, TestTrackingShardHandlerFactory, ClassificationUpdateProcessorIntegrationTest, TestComponentsName, ShowFileRequestHandlerTest, CloudMLTQParserTest, OverseerTaskQueueTest, MergeStrategyTest, TestHdfsCloudBackupRestore, TestRestManager, TestIBSimilarityFactory, AssignTest, TestXmlQParser, SparseHLLTest, DirectUpdateHandlerTest, TestNumericTerms64, PreAnalyzedFieldTest, TestFieldCacheWithThreads, TestCloudDeleteByQuery, BasicAuthIntegrationTest, StatsComponentTest, AddSchemaFieldsUpdateProcessorFactoryTest, TestHdfsBackupRestoreCore, TestCloudPseudoReturnFields, LeaderInitiatedRecoveryOnShardRestartTest, CoreAdminHandlerTest, TestUnifiedSolrHighlighter, ExternalFileFieldSortTest, PeerSyncWithIndexFingerprintCachingTest, PrimUtilsTest, TestSmileRequest, TestReload, FieldMutatingUpdateProcessorTest, FullHLLTest, TestSolrCloudWithKerberosAlt, HdfsBasicDistributedZk2Test, HdfsSyncSliceTest, TestFieldSortValues, TestShardHandlerFactory, SolrCoreMetricManagerTest, SpatialHeatmapFacetsTest, SimpleFacetsTest, StatelessScriptUpdateProcessorFactoryTest, TestGraphMLResponseWriter, SaslZkACLProviderTest, DeleteReplicaTest, ForceLeaderTest, EchoParamsTest, TestDynamicLoading, TestObjectReleaseTracker, DebugComponentTest, BigEndianAscendingWordSerializerTest, TestBinaryField, TestOnReconnectListenerSupport, PrimitiveFieldTypeTest, TestPhraseSuggestions, TestPHPSerializedResponseWriter, TlogReplayBufferedWhileIndexingTest, TestFieldCacheSort, TestConfigOverlay, TestReloadDeadlock, SolrTestCaseJ4Test, HardAutoCommitTest, RequestHandlersTest, TestSchemalessBufferedUpdates, SolrCloudExampleTest, TestRecovery, CurrencyFieldXmlFileTest, TestSolrXml, MetricsHandlerTest, TestReversedWildcardFilterFactory, DateFieldTest, ResourceLoaderTest, CoreSorterTest, SharedFSAutoReplicaFailoverUtilsTest, OverseerModifyCollectionTest, TestMiniSolrCloudCluster, TestIndexingPerformance, TestUtils, UnloadDistributedZkTest, BadIndexSchemaTest, TestStressLiveNodes, TestFieldCache, LeaderFailoverAfterPartitionTest]
   [junit4] Completed [321/676 (1!)] on J2 in 148.84s, 1 test, 1 failure <<< FAILURES!

[...truncated 63553 lines...]