You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@lucene.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2016/10/14 00:04:55 UTC

[JENKINS] Lucene-Solr-Tests-master - Build # 1420 - Unstable

Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/1420/

1 tests failed.
FAILED:  org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test

Error Message:
Expected 2 of 3 replicas to be active but only found 1; [core_node2:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:37930","node_name":"127.0.0.1:37930_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/19)={   "replicationFactor":"3",   "shards":{"shard1":{       "range":"80000000-7fffffff",       "state":"active",       "replicas":{         "core_node1":{           "state":"down",           "base_url":"http://127.0.0.1:46131",           "core":"c8n_1x3_lf_shard1_replica1",           "node_name":"127.0.0.1:46131_"},         "core_node2":{           "core":"c8n_1x3_lf_shard1_replica3",           "base_url":"http://127.0.0.1:37930",           "node_name":"127.0.0.1:37930_",           "state":"active",           "leader":"true"},         "core_node3":{           "core":"c8n_1x3_lf_shard1_replica2",           "base_url":"http://127.0.0.1:41673",           "node_name":"127.0.0.1:41673_",           "state":"down"}}}},   "router":{"name":"compositeId"},   "maxShardsPerNode":"1",   "autoAddReplicas":"false"}

Stack Trace:
java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 1; [core_node2:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:37930","node_name":"127.0.0.1:37930_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/19)={
  "replicationFactor":"3",
  "shards":{"shard1":{
      "range":"80000000-7fffffff",
      "state":"active",
      "replicas":{
        "core_node1":{
          "state":"down",
          "base_url":"http://127.0.0.1:46131",
          "core":"c8n_1x3_lf_shard1_replica1",
          "node_name":"127.0.0.1:46131_"},
        "core_node2":{
          "core":"c8n_1x3_lf_shard1_replica3",
          "base_url":"http://127.0.0.1:37930",
          "node_name":"127.0.0.1:37930_",
          "state":"active",
          "leader":"true"},
        "core_node3":{
          "core":"c8n_1x3_lf_shard1_replica2",
          "base_url":"http://127.0.0.1:41673",
          "node_name":"127.0.0.1:41673_",
          "state":"down"}}}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"1",
  "autoAddReplicas":"false"}
	at __randomizedtesting.SeedInfo.seed([D76B0C6F3772C6F9:5F3F33B5998EAB01]:0)
	at org.junit.Assert.fail(Assert.java:93)
	at org.junit.Assert.assertTrue(Assert.java:43)
	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:170)
	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:57)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1764)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:871)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:907)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:921)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:809)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:460)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:880)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:781)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:816)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:827)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
	at java.lang.Thread.run(Thread.java:745)




Build Log:
[...truncated 12107 lines...]
   [junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
   [junit4]   2> Creating dataDir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/init-core-data-001
   [junit4]   2> 1375161 INFO  (SUITE-LeaderFailoverAfterPartitionTest-seed#[D76B0C6F3772C6F9]-worker) [    ] o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: @org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 1375162 INFO  (SUITE-LeaderFailoverAfterPartitionTest-seed#[D76B0C6F3772C6F9]-worker) [    ] o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   2> 1375163 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 1375164 INFO  (Thread-6212) [    ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 1375164 INFO  (Thread-6212) [    ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 1375264 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkTestServer start zk server on port:51607
   [junit4]   2> 1375268 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn caught end of stream exception
   [junit4]   2> EndOfStreamException: Unable to read additional data from client sessionid 0x157c06c3c9e0000, likely client has closed socket
   [junit4]   2> 	at org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
   [junit4]   2> 	at org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 1375274 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
   [junit4]   2> 1375275 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml
   [junit4]   2> 1375276 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 1375283 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
   [junit4]   2> 1375284 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
   [junit4]   2> 1375285 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
   [junit4]   2> 1375286 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
   [junit4]   2> 1375287 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 1375288 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 1375289 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
   [junit4]   2> 1375290 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractZkTestCase put /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
   [junit4]   2> 1375291 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn caught end of stream exception
   [junit4]   2> EndOfStreamException: Unable to read additional data from client sessionid 0x157c06c3c9e0001, likely client has closed socket
   [junit4]   2> 	at org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
   [junit4]   2> 	at org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 1375356 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/control-001/cores/collection1
   [junit4]   2> 1375358 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server jetty-9.3.8.v20160314
   [junit4]   2> 1375361 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@76f50d1c{/,null,AVAILABLE}
   [junit4]   2> 1375361 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.ServerConnector Started ServerConnector@7066c819{HTTP/1.1,[http/1.1]}{127.0.0.1:45361}
   [junit4]   2> 1375361 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server Started @1377044ms
   [junit4]   2> 1375361 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/tempDir-001/control/data, hostContext=/, hostPort=58767, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/control-001/cores}
   [junit4]   2> 1375361 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 1375362 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1375362 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1375362 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2016-10-13T23:41:41.604Z
   [junit4]   2> 1375364 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 1375364 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/control-001/solr.xml
   [junit4]   2> 1375369 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:51607/solr
   [junit4]   2> 1375383 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:58767_    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:58767_
   [junit4]   2> 1375384 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:58767_    ] o.a.s.c.Overseer Overseer (id=96757488117743620-127.0.0.1:58767_-n_0000000000) starting
   [junit4]   2> 1375389 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:58767_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:58767_
   [junit4]   2> 1375391 INFO  (zkCallback-13960-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1375398 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:58767_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/control-001/cores
   [junit4]   2> 1375398 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:58767_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1375400 INFO  (OverseerStateUpdate-96757488117743620-127.0.0.1:58767_-n_0000000000) [n:127.0.0.1:58767_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1
   [junit4]   2> 1376412 WARN  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection   x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead.
   [junit4]   2> 1376414 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 1376425 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 1376508 WARN  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 1376510 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 1376524 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection control_collection
   [junit4]   2> 1376525 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/control-001/cores/collection1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/control-001/cores/collection1/data/]
   [junit4]   2> 1376525 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4f453c43
   [junit4]   2> 1376525 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=388857997, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 1376528 WARN  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 1376533 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1376533 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1376534 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1376534 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1376534 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=13, maxMergeAtOnceExplicit=47, maxMergedSegmentMB=6.0244140625, floorSegmentMB=1.3544921875, forceMergeDeletesPctAllowed=24.812078960127543, segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.7266835384905741
   [junit4]   2> 1376534 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@5b61c99[collection1] main]
   [junit4]   2> 1376535 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1376536 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1376536 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 1376536 INFO  (coreLoadExecutor-7984-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1548119811322544128
   [junit4]   2> 1376538 INFO  (searcherExecutor-7985-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@5b61c99[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1376543 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 1376543 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 1376543 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:58767/collection1/
   [junit4]   2> 1376543 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 1376543 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:58767/collection1/ has no replicas
   [junit4]   2> 1376546 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:58767/collection1/ shard1
   [junit4]   2> 1376697 INFO  (coreZkRegister-7977-thread-1-processing-n:127.0.0.1:58767_ x:collection1 s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1376906 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1376907 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 1376907 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Creating collection1 with stateFormat=2
   [junit4]   2> 1376985 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001/cores/collection1
   [junit4]   2> 1376985 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001
   [junit4]   2> 1376986 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server jetty-9.3.8.v20160314
   [junit4]   2> 1376987 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@71caae1c{/,null,AVAILABLE}
   [junit4]   2> 1376987 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.ServerConnector Started ServerConnector@15f69acd{HTTP/1.1,[http/1.1]}{127.0.0.1:55495}
   [junit4]   2> 1376987 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server Started @1378671ms
   [junit4]   2> 1376987 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/tempDir-001/jetty1, solrconfig=solrconfig.xml, hostContext=/, hostPort=46131, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001/cores}
   [junit4]   2> 1376988 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 1376988 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1376988 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1376988 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2016-10-13T23:41:43.230Z
   [junit4]   2> 1376990 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 1376990 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001/solr.xml
   [junit4]   2> 1376998 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:51607/solr
   [junit4]   2> 1377006 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1377009 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:46131_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:46131_
   [junit4]   2> 1377010 INFO  (zkCallback-13964-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1377010 INFO  (zkCallback-13960-thread-3-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1377010 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1377025 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:46131_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001/cores
   [junit4]   2> 1377025 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:46131_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1377027 INFO  (OverseerStateUpdate-96757488117743620-127.0.0.1:58767_-n_0000000000) [n:127.0.0.1:58767_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 1377130 WARN  (OverseerStateUpdate-96757488117743620-127.0.0.1:58767_-n_0000000000) [n:127.0.0.1:58767_    ] o.a.s.c.Overseer Bad version writing to ZK using compare-and-set, will force refresh cluster state: KeeperErrorCode = BadVersion for /collections/collection1/state.json
   [junit4]   2> 1377133 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1377134 INFO  (OverseerStateUpdate-96757488117743620-127.0.0.1:58767_-n_0000000000) [n:127.0.0.1:58767_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 1377135 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1378046 WARN  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1   x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead.
   [junit4]   2> 1378048 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 1378061 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 1378173 WARN  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 1378175 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 1378184 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 1378185 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001/cores/collection1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 1378185 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4f453c43
   [junit4]   2> 1378186 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=388857997, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 1378189 WARN  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 1378195 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1378195 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1378195 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1378195 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1378196 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=13, maxMergeAtOnceExplicit=47, maxMergedSegmentMB=6.0244140625, floorSegmentMB=1.3544921875, forceMergeDeletesPctAllowed=24.812078960127543, segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.7266835384905741
   [junit4]   2> 1378196 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@efa850a[collection1] main]
   [junit4]   2> 1378197 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1378198 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1378198 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 1378198 INFO  (coreLoadExecutor-7995-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1548119813065277440
   [junit4]   2> 1378200 INFO  (searcherExecutor-7996-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@efa850a[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1378206 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 1378207 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 1378207 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:46131/collection1/
   [junit4]   2> 1378207 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 1378207 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:46131/collection1/ has no replicas
   [junit4]   2> 1378209 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:46131/collection1/ shard2
   [junit4]   2> 1378311 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1378360 INFO  (coreZkRegister-7990-thread-1-processing-n:127.0.0.1:46131_ x:collection1 s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1378463 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1378590 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001/cores/collection1
   [junit4]   2> 1378590 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001
   [junit4]   2> 1378591 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server jetty-9.3.8.v20160314
   [junit4]   2> 1378592 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@5dfecf80{/,null,AVAILABLE}
   [junit4]   2> 1378592 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.ServerConnector Started ServerConnector@27b744b2{HTTP/1.1,[http/1.1]}{127.0.0.1:39214}
   [junit4]   2> 1378592 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server Started @1380275ms
   [junit4]   2> 1378592 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/tempDir-001/jetty2, solrconfig=solrconfig.xml, hostContext=/, hostPort=37930, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001/cores}
   [junit4]   2> 1378592 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 1378593 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1378593 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1378593 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2016-10-13T23:41:44.835Z
   [junit4]   2> 1378597 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 1378597 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001/solr.xml
   [junit4]   2> 1378606 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:51607/solr
   [junit4]   2> 1378614 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 1378617 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:37930_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:37930_
   [junit4]   2> 1378618 INFO  (zkCallback-13960-thread-2-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1378618 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1378622 INFO  (zkCallback-13964-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1378622 INFO  (zkCallback-13976-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 1378626 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:37930_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001/cores
   [junit4]   2> 1378626 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:37930_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1378630 INFO  (OverseerStateUpdate-96757488117743620-127.0.0.1:58767_-n_0000000000) [n:127.0.0.1:58767_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard1
   [junit4]   2> 1378732 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 1378732 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 1379644 WARN  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1   x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead.
   [junit4]   2> 1379645 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 1379671 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 1379746 WARN  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 1379748 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 1379759 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 1379759 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001/cores/collection1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 1379760 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4f453c43
   [junit4]   2> 1379761 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=388857997, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 1379765 WARN  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 1379771 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1379771 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1379796 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1379796 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1379797 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=13, maxMergeAtOnceExplicit=47, maxMergedSegmentMB=6.0244140625, floorSegmentMB=1.3544921875, forceMergeDeletesPctAllowed=24.812078960127543, segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.7266835384905741
   [junit4]   2> 1379797 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@6009131[collection1] main]
   [junit4]   2> 1379798 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1379798 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1379798 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 1379799 INFO  (searcherExecutor-8007-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@6009131[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1379799 INFO  (coreLoadExecutor-8006-thread-1-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1548119814744047616
   [junit4]   2> 1379804 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 1379804 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 1379804 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync replicas to http://127.0.0.1:37930/collection1/
   [junit4]   2> 1379804 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 1379804 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.SyncStrategy http://127.0.0.1:37930/collection1/ has no replicas
   [junit4]   2> 1379807 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new leader: http://127.0.0.1:37930/collection1/ shard1
   [junit4]   2> 1379909 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 1379909 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 1379958 INFO  (coreZkRegister-8001-thread-1-processing-n:127.0.0.1:37930_ x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 1380061 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 1380062 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [3])
   [junit4]   2> 1380190 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.SolrTestCaseJ4 Writing core.properties file to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001/cores/collection1
   [junit4]   2> 1380190 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001
   [junit4]   2> 1380191 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server jetty-9.3.8.v20160314
   [junit4]   2> 1380192 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@3bc14d56{/,null,AVAILABLE}
   [junit4]   2> 1380193 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.ServerConnector Started ServerConnector@4ba975a3{HTTP/1.1,[http/1.1]}{127.0.0.1:50591}
   [junit4]   2> 1380193 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.Server Started @1381876ms
   [junit4]   2> 1380193 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/tempDir-001/jetty3, solrconfig=solrconfig.xml, hostContext=/, hostPort=41673, coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001/cores}
   [junit4]   2> 1380194 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 7.0.0
   [junit4]   2> 1380194 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1380194 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1380194 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2016-10-13T23:41:46.436Z
   [junit4]   2> 1380197 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in ZooKeeper)
   [junit4]   2> 1380197 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SolrXmlConfig Loading container configuration from /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001/solr.xml
   [junit4]   2> 1380205 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:51607/solr
   [junit4]   2> 1380213 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:41673_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 1380217 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:41673_    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:41673_
   [junit4]   2> 1380218 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1380218 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1380218 INFO  (zkCallback-13964-thread-2) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1380218 INFO  (zkCallback-13960-thread-1-processing-n:127.0.0.1:58767_) [n:127.0.0.1:58767_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1380219 INFO  (zkCallback-13982-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 1380228 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:41673_    ] o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001/cores
   [junit4]   2> 1380228 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [n:127.0.0.1:41673_    ] o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 1380230 INFO  (OverseerStateUpdate-96757488117743620-127.0.0.1:58767_-n_0000000000) [n:127.0.0.1:58767_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard shard=shard2
   [junit4]   2> 1380332 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1380332 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1380332 INFO  (zkCallback-13982-thread-2-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1381239 WARN  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1   x:collection1] o.a.s.c.Config Beginning with Solr 5.5, <mergePolicy> is deprecated, use <mergePolicyFactory> instead.
   [junit4]   2> 1381240 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
   [junit4]   2> 1381252 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=test
   [junit4]   2> 1382252 ERROR (Finalizer) [    ] o.a.s.u.ConcurrentLFUCache ConcurrentLFUCache was not destroyed prior to finalize(), indicates a bug -- POSSIBLE RESOURCE LEAK!!!
   [junit4]   2> 1382285 WARN  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1   x:collection1] o.a.s.s.IndexSchema [collection1] default search field in schema is text. WARNING: Deprecated, please use 'df' on request instead.
   [junit4]   2> 1382288 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 1382295 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1   x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 1382295 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at [/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001/cores/collection1], dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 1382296 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4f453c43
   [junit4]   2> 1382296 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.AlcoholicMergePolicy: [AlcoholicMergePolicy: minMergeSize=0, mergeFactor=10, maxMergeSize=388857997, maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.1]
   [junit4]   2> 1382300 WARN  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = requestHandler,name = /dump,class = DumpRequestHandler,args = {defaults={a=A,b=B}}}
   [junit4]   2> 1382306 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1382306 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1382306 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 1382306 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 1382307 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=13, maxMergeAtOnceExplicit=47, maxMergedSegmentMB=6.0244140625, floorSegmentMB=1.3544921875, forceMergeDeletesPctAllowed=24.812078960127543, segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.7266835384905741
   [junit4]   2> 1382308 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.s.SolrIndexSearcher Opening [Searcher@3af9a6d[collection1] main]
   [junit4]   2> 1382309 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1382309 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1382309 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 1382310 INFO  (searcherExecutor-8018-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher Searcher@3af9a6d[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1382311 INFO  (coreLoadExecutor-8017-thread-1-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1548119817378070528
   [junit4]   2> 1382315 INFO  (coreZkRegister-8012-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to recover:collection1
   [junit4]   2> 1382315 INFO  (updateExecutor-13979-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 1382316 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process. recoveringAfterStartup=true
   [junit4]   2> 1382316 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
   [junit4]   2> 1382316 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates. core=[collection1]
   [junit4]   2> 1382316 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 1382317 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core [collection1] as recovering, leader is [http://127.0.0.1:46131/collection1/] and I am [http://127.0.0.1:41673/collection1/]
   [junit4]   2> 1382319 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery command to [http://127.0.0.1:46131]; [WaitForState: action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:41673_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 1382319 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1382319 INFO  (zkCallback-13982-thread-2-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1382319 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1382320 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48515,localport=46131], receiveBufferSize:531000
   [junit4]   2> 1382324 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=55495,localport=49465], receiveBufferSize=530904
   [junit4]   2> 1382326 INFO  (qtp928787543-58558) [n:127.0.0.1:46131_    ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 1382326 INFO  (qtp928787543-58558) [n:127.0.0.1:46131_    ] o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 (shard2 of collection1) have state: recovering
   [junit4]   2> 1382326 INFO  (qtp928787543-58558) [n:127.0.0.1:46131_    ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, currentState=recovering, localState=active, nodeName=127.0.0.1:41673_, coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: core_node3:{"core":"collection1","base_url":"http://127.0.0.1:41673","node_name":"127.0.0.1:41673_","state":"recovering"}
   [junit4]   2> 1382326 INFO  (qtp928787543-58558) [n:127.0.0.1:46131_    ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader: true for: 0 seconds.
   [junit4]   2> 1382326 INFO  (qtp928787543-58558) [n:127.0.0.1:46131_    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={nodeName=127.0.0.1:41673_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 1382754 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 1382754 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 30000 for each attempt
   [junit4]   2> 1382754 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 1389327 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync from [http://127.0.0.1:46131/collection1/] - recoveringAfterStartup=[true]
   [junit4]   2> 1389327 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1 url=http://127.0.0.1:41673 START replicas=[http://127.0.0.1:46131/collection1/] nUpdates=100
   [junit4]   2> 1389328 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48522,localport=46131], receiveBufferSize:531000
   [junit4]   2> 1389328 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=55495,localport=49472], receiveBufferSize=530904
   [junit4]   2> 1389330 INFO  (qtp928787543-58562) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 1389330 INFO  (qtp928787543-58562) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/get params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. No need to do a PeerSync 
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery was successful.
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered during PeerSync.
   [junit4]   2> 1389330 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
   [junit4]   2> 1389331 INFO  (recoveryExecutor-13980-thread-1-processing-n:127.0.0.1:41673_ x:collection1 s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
   [junit4]   2> 1389332 INFO  (zkCallback-13970-thread-1-processing-n:127.0.0.1:46131_) [n:127.0.0.1:46131_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1389332 INFO  (zkCallback-13982-thread-2-processing-n:127.0.0.1:41673_) [n:127.0.0.1:41673_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1389332 INFO  (zkCallback-13976-thread-2-processing-n:127.0.0.1:37930_) [n:127.0.0.1:37930_    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/collection1/state.json] for collection [collection1] has occurred - updating... (live nodes size: [4])
   [junit4]   2> 1389758 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 1389758 INFO  (SocketProxy-Acceptor-58767) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=33573,localport=58767], receiveBufferSize:531000
   [junit4]   2> 1389759 INFO  (SocketProxy-Acceptor-58767) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=45361,localport=47075], receiveBufferSize=530904
   [junit4]   2> 1389765 INFO  (qtp1349940319-58517) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1389765 INFO  (qtp1349940319-58517) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1389765 INFO  (qtp1349940319-58517) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1389766 INFO  (qtp1349940319-58517) [n:127.0.0.1:58767_ c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 0
   [junit4]   2> 1389767 INFO  (SocketProxy-Acceptor-37930) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=38490,localport=37930], receiveBufferSize:531000
   [junit4]   2> 1389768 INFO  (SocketProxy-Acceptor-37930) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39214,localport=32988], receiveBufferSize=530904
   [junit4]   2> 1389769 INFO  (SocketProxy-Acceptor-37930) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=38492,localport=37930], receiveBufferSize:531000
   [junit4]   2> 1389770 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48530,localport=46131], receiveBufferSize:531000
   [junit4]   2> 1389770 INFO  (SocketProxy-Acceptor-41673) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45014,localport=41673], receiveBufferSize:531000
   [junit4]   2> 1389770 INFO  (SocketProxy-Acceptor-37930) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39214,localport=32990], receiveBufferSize=530904
   [junit4]   2> 1389771 INFO  (SocketProxy-Acceptor-41673) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=50591,localport=44232], receiveBufferSize=530904
   [junit4]   2> 1389774 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=55495,localport=49481], receiveBufferSize=530904
   [junit4]   2> 1389774 INFO  (qtp599192445-58585) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1389774 INFO  (qtp599192445-58585) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1389778 INFO  (qtp599192445-58585) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1389778 INFO  (qtp599192445-58585) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:37930/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 4
   [junit4]   2> 1389779 INFO  (qtp928787543-58561) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1389779 INFO  (qtp928787543-58561) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1389779 INFO  (qtp1290091903-58618) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1389779 INFO  (qtp1290091903-58618) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1389779 INFO  (qtp928787543-58561) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1389779 INFO  (qtp928787543-58561) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:37930/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 0
   [junit4]   2> 1389780 INFO  (qtp1290091903-58618) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 1389780 INFO  (qtp1290091903-58618) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:37930/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 0
   [junit4]   2> 1389780 INFO  (qtp599192445-58587) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update params={_stateVer_=collection1:10&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 12
   [junit4]   2> 1389781 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48534,localport=46131], receiveBufferSize:531000
   [junit4]   2> 1389782 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=55495,localport=49484], receiveBufferSize=530904
   [junit4]   2> 1389787 INFO  (qtp928787543-58560) [n:127.0.0.1:46131_ c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0
   [junit4]   2> 1389788 INFO  (SocketProxy-Acceptor-41673) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=45019,localport=41673], receiveBufferSize:531000
   [junit4]   2> 1389790 INFO  (SocketProxy-Acceptor-41673) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=50591,localport=44236], receiveBufferSize=530904
   [junit4]   2> 1389790 INFO  (qtp1290091903-58616) [n:127.0.0.1:41673_ c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0
   [junit4]   2> 1389794 INFO  (SocketProxy-Acceptor-37930) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=38502,localport=37930], receiveBufferSize:531000
   [junit4]   2> 1389798 INFO  (SocketProxy-Acceptor-37930) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=39214,localport=33000], receiveBufferSize=530904
   [junit4]   2> 1389798 INFO  (qtp599192445-58591) [n:127.0.0.1:37930_ c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request [collection1]  webapp= path=/select params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2} hits=0 status=0 QTime=0
   [junit4]   2> 1391800 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy accepted Socket[addr=/127.0.0.1,port=48551,localport=46131], receiveBufferSize:531000
   [junit4]   2> 1391801 INFO  (SocketProxy-Acceptor-46131) [    ] o.a.s.c.SocketProxy proxy connection Socket[addr=/127.0.0.1,port=55495,localport=49501], receiveBufferSize=530904
   [junit4]   2> 1391

[...truncated too long message...]

0.0.1:0}
   [junit4]   2> 1492879 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.e.j.s.h.ContextHandler Stopped o.e.j.s.ServletContextHandler@5dfecf80{/,null,UNAVAILABLE}
   [junit4]   2> 1492879 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ChaosMonkey monkey: stop shard! 41673
   [junit4]   2> 1492881 INFO  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:51607 51607
   [junit4]   2> 1493056 INFO  (Thread-6212) [    ] o.a.s.c.ZkTestServer connecting to 127.0.0.1:51607 51607
   [junit4]   2> 1493057 WARN  (Thread-6212) [    ] o.a.s.c.ZkTestServer Watch limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2> 	5	/solr/aliases.json
   [junit4]   2> 	5	/solr/clusterprops.json
   [junit4]   2> 	4	/solr/security.json
   [junit4]   2> 	4	/solr/configs/conf1
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2> 	5	/solr/clusterstate.json
   [junit4]   2> 	3	/solr/collections/c8n_1x3_lf/state.json
   [junit4]   2> 	3	/solr/collections/collection1/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2> 	113	/solr/overseer/collection-queue-work
   [junit4]   2> 	43	/solr/overseer/queue
   [junit4]   2> 	19	/solr/overseer/queue-work
   [junit4]   2> 	5	/solr/live_nodes
   [junit4]   2> 	5	/solr/collections
   [junit4]   2> 
   [junit4]   2> 1493058 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:58767/, target: http://127.0.0.1:45361/
   [junit4]   2> 1493058 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:41673/, target: http://127.0.0.1:50591/
   [junit4]   2> 1493058 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:46131/, target: http://127.0.0.1:55495/
   [junit4]   2> 1493058 WARN  (TEST-LeaderFailoverAfterPartitionTest.test-seed#[D76B0C6F3772C6F9]) [    ] o.a.s.c.SocketProxy Closing 6 connections to: http://127.0.0.1:37930/, target: http://127.0.0.1:39214/
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test -Dtests.seed=D76B0C6F3772C6F9 -Dtests.multiplier=2 -Dtests.slow=true -Dtests.locale=ar-LY -Dtests.timezone=America/Maceio -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1
   [junit4] FAILURE  118s J0 | LeaderFailoverAfterPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found 1; [core_node2:{"core":"c8n_1x3_lf_shard1_replica3","base_url":"http://127.0.0.1:37930","node_name":"127.0.0.1:37930_","state":"active","leader":"true"}]; clusterState: DocCollection(c8n_1x3_lf//collections/c8n_1x3_lf/state.json/19)={
   [junit4]    >   "replicationFactor":"3",
   [junit4]    >   "shards":{"shard1":{
   [junit4]    >       "range":"80000000-7fffffff",
   [junit4]    >       "state":"active",
   [junit4]    >       "replicas":{
   [junit4]    >         "core_node1":{
   [junit4]    >           "state":"down",
   [junit4]    >           "base_url":"http://127.0.0.1:46131",
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica1",
   [junit4]    >           "node_name":"127.0.0.1:46131_"},
   [junit4]    >         "core_node2":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica3",
   [junit4]    >           "base_url":"http://127.0.0.1:37930",
   [junit4]    >           "node_name":"127.0.0.1:37930_",
   [junit4]    >           "state":"active",
   [junit4]    >           "leader":"true"},
   [junit4]    >         "core_node3":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica2",
   [junit4]    >           "base_url":"http://127.0.0.1:41673",
   [junit4]    >           "node_name":"127.0.0.1:41673_",
   [junit4]    >           "state":"down"}}}},
   [junit4]    >   "router":{"name":"compositeId"},
   [junit4]    >   "maxShardsPerNode":"1",
   [junit4]    >   "autoAddReplicas":"false"}
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([D76B0C6F3772C6F9:5F3F33B5998EAB01]:0)
   [junit4]    > 	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:170)
   [junit4]    > 	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:57)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
   [junit4]    > 	at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 1493067 INFO  (SUITE-LeaderFailoverAfterPartitionTest-seed#[D76B0C6F3772C6F9]-worker) [    ] o.a.s.SolrTestCaseJ4 ###deleteCore
   [junit4]   2> NOTE: leaving temporary files on disk at: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest_D76B0C6F3772C6F9-001
   [junit4]   2> Oct 13, 2016 11:43:39 PM com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 1 leaked thread(s).
   [junit4]   2> NOTE: test params are: codec=CheapBastard, sim=RandomSimilarity(queryNorm=false): {}, locale=ar-LY, timezone=America/Maceio
   [junit4]   2> NOTE: Linux 3.13.0-85-generic amd64/Oracle Corporation 1.8.0_102 (64-bit)/cpus=4,threads=1,free=215530864,total=521142272
   [junit4]   2> NOTE: All tests run in this JVM: [TestJmxMonitoredMap, CoreAdminCreateDiscoverTest, TestStandardQParsers, TestStressLiveNodes, TestMaxScoreQueryParser, HighlighterMaxOffsetTest, BlobRepositoryCloudTest, ChangedSchemaMergeTest, NumericFieldsTest, TestCSVLoader, TestConfigOverlay, SortSpecParsingTest, TestEmbeddedSolrServerConstructors, TestXmlQParser, DisMaxRequestHandlerTest, StandardRequestHandlerTest, TestSolr4Spatial, SystemInfoHandlerTest, DistribDocExpirationUpdateProcessorTest, SyncSliceTest, SaslZkACLProviderTest, SolrCoreTest, NotRequiredUniqueKeyTest, MinimalSchemaTest, SuggestComponentTest, CursorPagingTest, DeleteShardTest, ConcurrentDeleteAndCreateCollectionTest, TestStressLucene, BigEndianAscendingWordDeserializerTest, TestPivotHelperCode, CloudMLTQParserTest, TestUseDocValuesAsStored2, DefaultValueUpdateProcessorTest, TestConfigSetsAPIExclusivity, TestNumericTerms32, TestOmitPositions, RestartWhileUpdatingTest, TestCollationFieldDocValues, StatsReloadRaceTest, SharedFSAutoReplicaFailoverUtilsTest, TestCodecSupport, TestSolrConfigHandler, TestManagedSynonymFilterFactory, TestSolrCoreSnapshots, TestSchemaVersionResource, RequiredFieldsTest, TestFastWriter, TestSchemaNameResource, TestLMJelinekMercerSimilarityFactory, ResponseHeaderTest, TestSortByMinMaxFunction, SolrCloudExampleTest, PathHierarchyTokenizerFactoryTest, TestManagedSchemaThreadSafety, FileBasedSpellCheckerTest, PluginInfoTest, TestLazyCores, RegexBoostProcessorTest, PeerSyncTest, TestBlendedInfixSuggestions, FastVectorHighlighterTest, PolyFieldTest, TestManagedResourceStorage, TestHighFrequencyDictionaryFactory, DocumentAnalysisRequestHandlerTest, TestImplicitCoreProperties, TestClusterStateMutator, TestRandomDVFaceting, TestHdfsCloudBackupRestore, TestFieldSortValues, TestRandomFaceting, PingRequestHandlerTest, TestQuerySenderListener, TestDocTermOrds, DocExpirationUpdateProcessorFactoryTest, TestDistribDocBasedVersion, TestReqParamsAPI, BasicZkTest, TestRealTimeGet, SpellPossibilityIteratorTest, CoreSorterTest, LeaderInitiatedRecoveryOnShardRestartTest, TestSolrQueryParserResource, TestExactSharedStatsCache, QueryEqualityTest, CdcrReplicationDistributedZkTest, DistributedSpellCheckComponentTest, TestValueSourceCache, TestFieldCacheVsDocValues, TestReplicationHandler, TestMiniSolrCloudClusterSSL, TestConfigReload, BufferStoreTest, CdcrVersionReplicationTest, UUIDUpdateProcessorFallbackTest, TestRTimerTree, TestRangeQuery, CheckHdfsIndexTest, TestHdfsUpdateLog, DateMathParserTest, ZkNodePropsTest, PreAnalyzedUpdateProcessorTest, TestCollapseQParserPlugin, BasicDistributedZkTest, BasicDistributedZk2Test, OverseerTest, LeaderElectionIntegrationTest, RecoveryZkTest, ClusterStateUpdateTest, TestZkChroot, ZkCLITest, TestDistributedSearch, TermVectorComponentDistributedTest, TestJoin, SimpleFacetsTest, TestGroupingSearch, QueryElevationComponentTest, TestFiltering, CurrencyFieldOpenExchangeTest, SolrIndexSplitterTest, SimplePostToolTest, AnalysisAfterCoreReloadTest, SignatureUpdateProcessorFactoryTest, SpellCheckCollatorTest, CoreAdminHandlerTest, TestFoldingMultitermQuery, SuggesterTSTTest, SolrCoreCheckLockOnStartupTest, TestPseudoReturnFields, FieldMutatingUpdateProcessorTest, DirectUpdateHandlerOptimizeTest, TestRemoteStreaming, SolrInfoMBeanTest, CacheHeaderTest, LukeRequestHandlerTest, TermsComponentTest, TermVectorComponentTest, TestJmxIntegration, ReturnFieldsTest, TestAnalyzedSuggestions, TestLFUCache, ScriptEngineTest, PreAnalyzedFieldTest, TestSuggestSpellingConverter, SpellingQueryConverterTest, RAMDirectoryFactoryTest, TestLRUCache, SliceStateTest, CircularListTest, DistributedMLTComponentTest, ConnectionReuseTest, AsyncCallRequestStatusResponseTest, CdcrReplicationHandlerTest, CdcrRequestHandlerTest, CleanupOldIndexTest, CollectionTooManyReplicasTest, DeleteLastCustomShardedReplicaTest, DeleteReplicaTest, DistributedQueueTest, DistributedVersionInfoTest, LeaderFailoverAfterPartitionTest]
   [junit4] Completed [477/638 (1!)] on J0 in 118.69s, 1 test, 1 failure <<< FAILURES!

[...truncated 60829 lines...]