You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@lucene.apache.org by Policeman Jenkins Server <je...@thetaphi.de> on 2015/05/08 17:39:58 UTC

[JENKINS] Lucene-Solr-trunk-Linux (64bit/jdk1.8.0_45) - Build # 12599 - Failure!

Build: http://jenkins.thetaphi.de/job/Lucene-Solr-trunk-Linux/12599/
Java: 64bit/jdk1.8.0_45 -XX:+UseCompressedOops -XX:+UseParallelGC

1 tests failed.
FAILED:  org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test

Error Message:
Didn't see replicas [core_node2, core_node3] come up within 90000 ms! ClusterState: DocCollection(c8n_1x3_lf)={   "replicationFactor":"3",   "shards":{"shard1":{       "range":"80000000-7fffffff",       "state":"active",       "replicas":{         "core_node1":{           "core":"c8n_1x3_lf_shard1_replica2",           "base_url":"http://127.0.0.1:45017",           "node_name":"127.0.0.1:45017_",           "state":"down"},         "core_node2":{           "core":"c8n_1x3_lf_shard1_replica1",           "base_url":"http://127.0.0.1:47799",           "node_name":"127.0.0.1:47799_",           "state":"recovering"},         "core_node3":{           "core":"c8n_1x3_lf_shard1_replica3",           "base_url":"http://127.0.0.1:41964",           "node_name":"127.0.0.1:41964_",           "state":"active",           "leader":"true"}}}},   "router":{"name":"compositeId"},   "maxShardsPerNode":"1",   "autoAddReplicas":"false"}

Stack Trace:
java.lang.AssertionError: Didn't see replicas [core_node2, core_node3] come up within 90000 ms! ClusterState: DocCollection(c8n_1x3_lf)={
  "replicationFactor":"3",
  "shards":{"shard1":{
      "range":"80000000-7fffffff",
      "state":"active",
      "replicas":{
        "core_node1":{
          "core":"c8n_1x3_lf_shard1_replica2",
          "base_url":"http://127.0.0.1:45017",
          "node_name":"127.0.0.1:45017_",
          "state":"down"},
        "core_node2":{
          "core":"c8n_1x3_lf_shard1_replica1",
          "base_url":"http://127.0.0.1:47799",
          "node_name":"127.0.0.1:47799_",
          "state":"recovering"},
        "core_node3":{
          "core":"c8n_1x3_lf_shard1_replica3",
          "base_url":"http://127.0.0.1:41964",
          "node_name":"127.0.0.1:41964_",
          "state":"active",
          "leader":"true"}}}},
  "router":{"name":"compositeId"},
  "maxShardsPerNode":"1",
  "autoAddReplicas":"false"}
	at __randomizedtesting.SeedInfo.seed([78CDE9CFD1BB0D04:F099D6157F4760FC]:0)
	at org.junit.Assert.fail(Assert.java:93)
	at org.apache.solr.cloud.HttpPartitionTest.waitToSeeReplicasActive(HttpPartitionTest.java:547)
	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:178)
	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:51)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:497)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1627)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:836)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:872)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:886)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:960)
	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:935)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:50)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46)
	at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:49)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:798)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:458)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:845)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$3.evaluate(RandomizedRunner.java:747)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$4.evaluate(RandomizedRunner.java:781)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:792)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:46)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:42)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:39)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:54)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:48)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:65)
	at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:55)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:365)
	at java.lang.Thread.run(Thread.java:745)




Build Log:
[...truncated 9851 lines...]
   [junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
   [junit4]   2> Creating dataDir: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/init-core-data-001
   [junit4]   2> 286462 T1597 oas.BaseDistributedSearchTestCase.initHostContext Setting hostContext system property: /
   [junit4]   2> 286463 T1597 oasc.ZkTestServer.run STARTING ZK TEST SERVER
   [junit4]   2> 286464 T1598 oasc.ZkTestServer$2$1.setClientPort client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 286464 T1598 oasc.ZkTestServer$ZKServerMain.runFromConfig Starting server
   [junit4]   2> 286565 T1597 oasc.ZkTestServer.run start zk server on port:36134
   [junit4]   2> 286616 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml to /configs/conf1/solrconfig.xml
   [junit4]   2> 286617 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml to /configs/conf1/schema.xml
   [junit4]   2> 286618 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 286619 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt to /configs/conf1/stopwords.txt
   [junit4]   2> 286622 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt to /configs/conf1/protwords.txt
   [junit4]   2> 286623 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml to /configs/conf1/currency.xml
   [junit4]   2> 286624 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml to /configs/conf1/enumsConfig.xml
   [junit4]   2> 286625 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 286626 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 286627 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt to /configs/conf1/old_synonyms.txt
   [junit4]   2> 286628 T1597 oasc.AbstractZkTestCase.putConfig put /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt to /configs/conf1/synonyms.txt
   [junit4]   2> 286725 T1597 oas.SolrTestCaseJ4.writeCoreProperties Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1
   [junit4]   2> 286728 T1597 oejs.Server.doStart jetty-9.2.10.v20150310
   [junit4]   2> 286730 T1597 oejsh.ContextHandler.doStart Started o.e.j.s.ServletContextHandler@124b0816{/,null,AVAILABLE}
   [junit4]   2> 286740 T1597 oejs.AbstractConnector.doStart Started ServerConnector@61ab60c9{HTTP/1.1}{127.0.0.1:?????}
   [junit4]   2> 286741 T1597 oejs.Server.doStart Started @??????ms
   [junit4]   2> 286741 T1597 oascse.JettySolrRunner$1.lifeCycleStarted Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/tempDir-001/control/data, hostContext=/, hostPort=45017, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores}
   [junit4]   2> 286742 T1597 oass.SolrDispatchFilter.init SolrDispatchFilter.init()sun.misc.Launcher$AppClassLoader@4e0e2f2a
   [junit4]   2> 286742 T1597 oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/'
   [junit4]   2> 286756 T1597 oasc.SolrXmlConfig.fromFile Loading container configuration from /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/solr.xml
   [junit4]   2> 286761 T1597 oasc.CorePropertiesLocator.<init> Config-defined core root directory: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores
   [junit4]   2> 286761 T1597 oasc.CoreContainer.<init> New CoreContainer 1022748762
   [junit4]   2> 286762 T1597 oasc.CoreContainer.load Loading cores into CoreContainer [instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/]
   [junit4]   2> 286762 T1597 oasc.CoreContainer.load loading shared library: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/lib
   [junit4]   2> 286762 T1597 oasc.SolrResourceLoader.addToClassLoader WARN Can't find (or read) directory to add to classloader: lib (resolved as: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/lib).
   [junit4]   2> 286769 T1597 oashc.HttpShardHandlerFactory.init created with socketTimeout : 90000,urlScheme : ,connTimeout : 15000,maxConnectionsPerHost : 20,maxConnections : 10000,corePoolSize : 0,maximumPoolSize : 2147483647,maxThreadIdleTime : 5,sizeOfQueue : -1,fairnessPolicy : false,useRetries : false,
   [junit4]   2> 286770 T1597 oasu.UpdateShardHandler.<init> Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 286771 T1597 oasl.LogWatcher.createWatcher SLF4J impl is org.slf4j.impl.Log4jLoggerFactory
   [junit4]   2> 286771 T1597 oasl.LogWatcher.newRegisteredLogWatcher Registering Log Listener [Log4j (org.slf4j.impl.Log4jLoggerFactory)]
   [junit4]   2> 286771 T1597 oasc.CoreContainer.load Node Name: 127.0.0.1
   [junit4]   2> 286772 T1597 oasc.ZkContainer.initZooKeeper Zookeeper client=127.0.0.1:36134/solr
   [junit4]   2> 286772 T1597 oasc.ZkController.checkChrootPath zkHost includes chroot
   [junit4]   2> 286819 T1597 n:127.0.0.1:45017_ oasc.ZkController.createEphemeralLiveNode Register node as live in ZooKeeper:/live_nodes/127.0.0.1:45017_
   [junit4]   2> 286822 T1597 n:127.0.0.1:45017_ oasc.Overseer.close Overseer (id=null) closing
   [junit4]   2> 286823 T1597 n:127.0.0.1:45017_ oasc.OverseerElectionContext.runLeaderProcess I am going to be the leader 127.0.0.1:45017_
   [junit4]   2> 286828 T1597 n:127.0.0.1:45017_ oasc.Overseer.start Overseer (id=93788467856474115-127.0.0.1:45017_-n_0000000000) starting
   [junit4]   2> 286832 T1597 n:127.0.0.1:45017_ oasc.OverseerAutoReplicaFailoverThread.<init> Starting OverseerAutoReplicaFailoverThread autoReplicaFailoverWorkLoopDelay=10000 autoReplicaFailoverWaitAfterExpiration=30000 autoReplicaFailoverBadNodeExpiration=60000
   [junit4]   2> 286832 T1627 n:127.0.0.1:45017_ oasc.OverseerCollectionProcessor.run Process current queue of collection creations
   [junit4]   2> 286860 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run Starting to work on the main queue
   [junit4]   2> 286875 T1597 n:127.0.0.1:45017_ oasc.CorePropertiesLocator.discover Looking for core definitions underneath /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores
   [junit4]   2> 286876 T1597 n:127.0.0.1:45017_ oasc.CoreDescriptor.<init> CORE DESCRIPTOR: {name=collection1, config=solrconfig.xml, transient=false, schema=schema.xml, loadOnStartup=true, instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1, collection=control_collection, absoluteInstDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/, coreNodeName=, dataDir=data/, shard=}
   [junit4]   2> 286877 T1597 n:127.0.0.1:45017_ oasc.CorePropertiesLocator.discoverUnder Found core collection1 in /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/
   [junit4]   2> 286877 T1597 n:127.0.0.1:45017_ oasc.CorePropertiesLocator.discover Found 1 core definitions
   [junit4]   2> 286878 T1629 n:127.0.0.1:45017_ c:control_collection x:collection1 oasc.ZkController.publish publishing core=collection1 state=down collection=control_collection
   [junit4]   2> 286878 T1629 n:127.0.0.1:45017_ c:control_collection x:collection1 oasc.ZkController.publish numShards not found on descriptor - reading it from system property
   [junit4]   2> 286880 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:45017",
   [junit4]   2> 	  "node_name":"127.0.0.1:45017_",
   [junit4]   2> 	  "numShards":"1",
   [junit4]   2> 	  "state":"down",
   [junit4]   2> 	  "shard":null,
   [junit4]   2> 	  "collection":"control_collection",
   [junit4]   2> 	  "operation":"state"} current state version: 0
   [junit4]   2> 286880 T1629 n:127.0.0.1:45017_ oasc.ZkController.waitForCoreNodeName look for our core node name
   [junit4]   2> 286881 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Update state numShards=1 message={
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:45017",
   [junit4]   2> 	  "node_name":"127.0.0.1:45017_",
   [junit4]   2> 	  "numShards":"1",
   [junit4]   2> 	  "state":"down",
   [junit4]   2> 	  "shard":null,
   [junit4]   2> 	  "collection":"control_collection",
   [junit4]   2> 	  "operation":"state"}
   [junit4]   2> 286881 T1626 n:127.0.0.1:45017_ oasco.ClusterStateMutator.createCollection building a new cName: control_collection
   [junit4]   2> 286881 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Assigning new node to shard shard=shard1
   [junit4]   2> 286888 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 287880 T1629 n:127.0.0.1:45017_ oasc.ZkController.waitForShardId waiting to find shard id in clusterstate for collection1
   [junit4]   2> 287881 T1629 n:127.0.0.1:45017_ oasc.ZkController.createCollectionZkNode Check for collection zkNode:control_collection
   [junit4]   2> 287881 T1629 n:127.0.0.1:45017_ oasc.ZkController.createCollectionZkNode Collection zkNode exists
   [junit4]   2> 287882 T1629 n:127.0.0.1:45017_ oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/'
   [junit4]   2> 287895 T1629 n:127.0.0.1:45017_ oasc.Config.<init> loaded config solrconfig.xml with version 0 
   [junit4]   2> 287900 T1629 n:127.0.0.1:45017_ oasc.SolrConfig.refreshRequestParams current version of requestparams : -1
   [junit4]   2> 287905 T1629 n:127.0.0.1:45017_ oasc.SolrConfig.<init> Using Lucene MatchVersion: 6.0.0
   [junit4]   2> 287916 T1629 n:127.0.0.1:45017_ oasc.SolrConfig.<init> Loaded SolrConfig: solrconfig.xml
   [junit4]   2> 287928 T1629 n:127.0.0.1:45017_ oass.IndexSchema.readSchema Reading Solr Schema from /configs/conf1/schema.xml
   [junit4]   2> 287932 T1629 n:127.0.0.1:45017_ oass.IndexSchema.readSchema [collection1] Schema name=test
   [junit4]   2> 287992 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider.init Initialized with rates=open-exchange-rates.json, refreshInterval=1440.
   [junit4]   2> 287999 T1629 n:127.0.0.1:45017_ oass.IndexSchema.readSchema default search field in schema is text
   [junit4]   2> 288001 T1629 n:127.0.0.1:45017_ oass.IndexSchema.readSchema unique key field: id
   [junit4]   2> 288015 T1629 n:127.0.0.1:45017_ oass.FileExchangeRateProvider.reload Reloading exchange rates from file currency.xml
   [junit4]   2> 288017 T1629 n:127.0.0.1:45017_ oass.FileExchangeRateProvider.reload Reloading exchange rates from file currency.xml
   [junit4]   2> 288018 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider.reload Reloading exchange rates from open-exchange-rates.json
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Unknown key IMPORTANT NOTE
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Expected key, got STRING
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider.reload Reloading exchange rates from open-exchange-rates.json
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Unknown key IMPORTANT NOTE
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Expected key, got STRING
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ oasc.CoreContainer.create Creating SolrCore 'collection1' using configuration from collection control_collection
   [junit4]   2> 288019 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.initDirectoryFactory org.apache.solr.core.MockDirectoryFactory
   [junit4]   2> 288020 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.<init> [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/], dataDir=[null]
   [junit4]   2> 288020 T1629 n:127.0.0.1:45017_ x:collection1 oasc.JmxMonitoredMap.<init> JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@695883a8
   [junit4]   2> 288020 T1629 n:127.0.0.1:45017_ x:collection1 oasc.CachingDirectoryFactory.get return new directory for /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/data
   [junit4]   2> 288020 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.getNewIndexDir New index directory detected: old=null new=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/data/index/
   [junit4]   2> 288020 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.initIndex WARN [collection1] Solr index directory '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/data/index' doesn't exist. Creating new index...
   [junit4]   2> 288021 T1629 n:127.0.0.1:45017_ x:collection1 oasc.CachingDirectoryFactory.get return new directory for /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/control-001/cores/collection1/data/index
   [junit4]   2> 288021 T1629 n:127.0.0.1:45017_ x:collection1 oasu.RandomMergePolicy.<init> RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=39, maxMergeAtOnceExplicit=50, maxMergedSegmentMB=82.201171875, floorSegmentMB=2.064453125, forceMergeDeletesPctAllowed=19.581522441688747, segmentsPerTier=11.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.6794599103411276
   [junit4]   2> 288021 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrDeletionPolicy.onCommit SolrDeletionPolicy.onCommit: commits: num=1
   [junit4]   2> 		commit{dir=MockDirectoryWrapper(RAMDirectory@7182746f lockFactory=org.apache.lucene.store.SingleInstanceLockFactory@4cb60004),segFN=segments_1,generation=1}
   [junit4]   2> 288021 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrDeletionPolicy.updateCommits newest commit generation = 1
   [junit4]   2> 288026 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "nodistrib"
   [junit4]   2> 288026 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "dedupe"
   [junit4]   2> 288026 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "dedupe"
   [junit4]   2> 288026 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "stored_sig"
   [junit4]   2> 288026 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "stored_sig"
   [junit4]   2> 288026 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "distrib-dup-test-chain-explicit"
   [junit4]   2> 288027 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "distrib-dup-test-chain-implicit"
   [junit4]   2> 288027 T1629 n:127.0.0.1:45017_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "distrib-dup-test-chain-implicit"
   [junit4]   2> 288027 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.loadUpdateProcessorChains no updateRequestProcessorChain defined as default, creating implicit default
   [junit4]   2> 288029 T1629 n:127.0.0.1:45017_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 288030 T1629 n:127.0.0.1:45017_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 288031 T1629 n:127.0.0.1:45017_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 288031 T1629 n:127.0.0.1:45017_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 288033 T1629 n:127.0.0.1:45017_ x:collection1 oasc.RequestHandlers.initHandlersFromConfig Registered paths: /admin/mbeans,standard,/update/csv,/update/json/docs,/admin/luke,/admin/segments,/get,/admin/system,/replication,/admin/properties,/config,/schema,/admin/plugins,/admin/logging,/update/json,/admin/threads,/admin/ping,/update,/admin/file
   [junit4]   2> 288034 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.initStatsCache Using default statsCache cache: org.apache.solr.search.stats.LocalStatsCache
   [junit4]   2> 288035 T1629 n:127.0.0.1:45017_ x:collection1 oasu.UpdateHandler.<init> Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 288035 T1629 n:127.0.0.1:45017_ x:collection1 oasu.UpdateLog.init Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10
   [junit4]   2> 288036 T1629 n:127.0.0.1:45017_ x:collection1 oasu.CommitTracker.<init> Hard AutoCommit: disabled
   [junit4]   2> 288036 T1629 n:127.0.0.1:45017_ x:collection1 oasu.CommitTracker.<init> Soft AutoCommit: disabled
   [junit4]   2> 288037 T1629 n:127.0.0.1:45017_ x:collection1 oasu.RandomMergePolicy.<init> RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=14, maxMergeAtOnceExplicit=19, maxMergedSegmentMB=54.2548828125, floorSegmentMB=1.7099609375, forceMergeDeletesPctAllowed=9.511577512215029, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
   [junit4]   2> 288037 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrDeletionPolicy.onInit SolrDeletionPolicy.onInit: commits: num=1
   [junit4]   2> 		commit{dir=MockDirectoryWrapper(RAMDirectory@7182746f lockFactory=org.apache.lucene.store.SingleInstanceLockFactory@4cb60004),segFN=segments_1,generation=1}
   [junit4]   2> 288038 T1629 n:127.0.0.1:45017_ x:collection1 oasc.SolrDeletionPolicy.updateCommits newest commit generation = 1
   [junit4]   2> 288038 T1629 n:127.0.0.1:45017_ x:collection1 oass.SolrIndexSearcher.<init> Opening Searcher@45262d98[collection1] main
   [junit4]   2> 288038 T1629 n:127.0.0.1:45017_ x:collection1 oasr.ManagedResourceStorage.newStorageIO Setting up ZooKeeper-based storage for the RestManager with znodeBase: /configs/conf1
   [junit4]   2> 288039 T1629 n:127.0.0.1:45017_ x:collection1 oasr.ManagedResourceStorage$ZooKeeperStorageIO.configure Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 288039 T1629 n:127.0.0.1:45017_ x:collection1 oasr.RestManager.init Initializing RestManager with initArgs: {}
   [junit4]   2> 288039 T1629 n:127.0.0.1:45017_ x:collection1 oasr.ManagedResourceStorage.load Reading _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 288039 T1629 n:127.0.0.1:45017_ x:collection1 oasr.ManagedResourceStorage$ZooKeeperStorageIO.openInputStream No data found for znode /configs/conf1/_rest_managed.json
   [junit4]   2> 288039 T1629 n:127.0.0.1:45017_ x:collection1 oasr.ManagedResourceStorage.load Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 288040 T1629 n:127.0.0.1:45017_ x:collection1 oasr.RestManager.init Initializing 0 registered ManagedResources
   [junit4]   2> 288040 T1629 n:127.0.0.1:45017_ x:collection1 oash.ReplicationHandler.inform Commits will be reserved for  10000
   [junit4]   2> 288040 T1629 n:127.0.0.1:45017_ x:collection1 oasc.ZkController.getConfDirListeners watch zkdir /configs/conf1
   [junit4]   2> 288040 T1630 n:127.0.0.1:45017_ x:collection1 oasc.SolrCore.registerSearcher [collection1] Registered new searcher Searcher@45262d98[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 288040 T1629 n:127.0.0.1:45017_ x:collection1 oasc.CoreContainer.registerCore registering core: collection1
   [junit4]   2> 288041 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ZkController.register Register replica - core:collection1 address:http://127.0.0.1:45017 collection:control_collection shard:shard1
   [junit4]   2> 288041 T1597 n:127.0.0.1:45017_ oass.SolrDispatchFilter.init user.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0
   [junit4]   2> 288041 T1597 n:127.0.0.1:45017_ oass.SolrDispatchFilter.init SolrDispatchFilter.init() done
   [junit4]   2> 288043 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess Running the leader process for shard shard1
   [junit4]   2> 288044 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 288044 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ShardLeaderElectionContext.waitForReplicasToComeUp Enough replicas found to continue.
   [junit4]   2> 288044 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess I may be the new leader - try and sync
   [junit4]   2> 288044 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "operation":"leader",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"control_collection"} current state version: 1
   [junit4]   2> ASYNC  NEW_CORE C248 name=collection1 org.apache.solr.core.SolrCore@68e77dfc url=http://127.0.0.1:45017/collection1 node=127.0.0.1:45017_ C248_STATE=coll:control_collection core:collection1 props:{core=collection1, base_url=http://127.0.0.1:45017, node_name=127.0.0.1:45017_, state=down}
   [junit4]   2> 288044 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 C248 oasc.SyncStrategy.sync Sync replicas to http://127.0.0.1:45017/collection1/
   [junit4]   2> 288046 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 C248 oasc.SyncStrategy.syncReplicas Sync Success - now sync replicas to me
   [junit4]   2> 288046 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 C248 oasc.SyncStrategy.syncToMe http://127.0.0.1:45017/collection1/ has no replicas
   [junit4]   2> 288046 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess I am the new leader: http://127.0.0.1:45017/collection1/ shard1
   [junit4]   2> 288051 T1597 oasc.ChaosMonkey.monkeyLog monkey: init - expire sessions:false cause connection loss:false
   [junit4]   2> 288057 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 288058 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "operation":"leader",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"control_collection",
   [junit4]   2> 	  "base_url":"http://127.0.0.1:45017",
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "state":"active"} current state version: 1
   [junit4]   2> 288123 T1597 oas.SolrTestCaseJ4.writeCoreProperties Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1
   [junit4]   2> 288124 T1597 oasc.AbstractFullDistribZkTestBase.createJettys create jetty 1 in directory /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001
   [junit4]   2> 288125 T1597 oejs.Server.doStart jetty-9.2.10.v20150310
   [junit4]   2> 288126 T1597 oejsh.ContextHandler.doStart Started o.e.j.s.ServletContextHandler@64f223ea{/,null,AVAILABLE}
   [junit4]   2> 288127 T1597 oejs.AbstractConnector.doStart Started ServerConnector@6c348785{HTTP/1.1}{127.0.0.1:?????}
   [junit4]   2> 288128 T1597 oejs.Server.doStart Started @??????ms
   [junit4]   2> 288128 T1597 oascse.JettySolrRunner$1.lifeCycleStarted Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/tempDir-001/jetty1, solrconfig=solrconfig.xml, hostContext=/, hostPort=41964, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores}
   [junit4]   2> 288128 T1597 oass.SolrDispatchFilter.init SolrDispatchFilter.init()sun.misc.Launcher$AppClassLoader@4e0e2f2a
   [junit4]   2> 288129 T1597 oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/'
   [junit4]   2> 288142 T1597 oasc.SolrXmlConfig.fromFile Loading container configuration from /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/solr.xml
   [junit4]   2> 288146 T1597 oasc.CorePropertiesLocator.<init> Config-defined core root directory: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores
   [junit4]   2> 288147 T1597 oasc.CoreContainer.<init> New CoreContainer 1157977681
   [junit4]   2> 288147 T1597 oasc.CoreContainer.load Loading cores into CoreContainer [instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/]
   [junit4]   2> 288148 T1597 oasc.CoreContainer.load loading shared library: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/lib
   [junit4]   2> 288148 T1597 oasc.SolrResourceLoader.addToClassLoader WARN Can't find (or read) directory to add to classloader: lib (resolved as: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/lib).
   [junit4]   2> 288154 T1597 oashc.HttpShardHandlerFactory.init created with socketTimeout : 90000,urlScheme : ,connTimeout : 15000,maxConnectionsPerHost : 20,maxConnections : 10000,corePoolSize : 0,maximumPoolSize : 2147483647,maxThreadIdleTime : 5,sizeOfQueue : -1,fairnessPolicy : false,useRetries : false,
   [junit4]   2> 288156 T1597 oasu.UpdateShardHandler.<init> Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 288156 T1597 oasl.LogWatcher.createWatcher SLF4J impl is org.slf4j.impl.Log4jLoggerFactory
   [junit4]   2> 288157 T1597 oasl.LogWatcher.newRegisteredLogWatcher Registering Log Listener [Log4j (org.slf4j.impl.Log4jLoggerFactory)]
   [junit4]   2> 288157 T1597 oasc.CoreContainer.load Node Name: 127.0.0.1
   [junit4]   2> 288157 T1597 oasc.ZkContainer.initZooKeeper Zookeeper client=127.0.0.1:36134/solr
   [junit4]   2> 288157 T1597 oasc.ZkController.checkChrootPath zkHost includes chroot
   [junit4]   2> 288209 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ZkController.register We are http://127.0.0.1:45017/collection1/ and leader is http://127.0.0.1:45017/collection1/
   [junit4]   2> 288209 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ZkController.register No LogReplay needed for core=collection1 baseURL=http://127.0.0.1:45017
   [junit4]   2> 288210 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ZkController.checkRecovery I am the leader, no recovery necessary
   [junit4]   2> 288210 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ZkController.publish publishing core=collection1 state=active collection=control_collection
   [junit4]   2> 288210 T1633 n:127.0.0.1:45017_ c:control_collection s:shard1 x:collection1 oasc.ZkController.publish numShards not found on descriptor - reading it from system property
   [junit4]   2> 288212 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 288213 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "core_node_name":"core_node1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:45017",
   [junit4]   2> 	  "node_name":"127.0.0.1:45017_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"active",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"control_collection",
   [junit4]   2> 	  "operation":"state"} current state version: 2
   [junit4]   2> 288213 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Update state numShards=2 message={
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "core_node_name":"core_node1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:45017",
   [junit4]   2> 	  "node_name":"127.0.0.1:45017_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"active",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"control_collection",
   [junit4]   2> 	  "operation":"state"}
   [junit4]   2> 289185 T1597 n:127.0.0.1:41964_ oasc.ZkController.createEphemeralLiveNode Register node as live in ZooKeeper:/live_nodes/127.0.0.1:41964_
   [junit4]   2> 289190 T1597 n:127.0.0.1:41964_ oasc.Overseer.close Overseer (id=null) closing
   [junit4]   2> 289191 T1597 n:127.0.0.1:41964_ oasc.CorePropertiesLocator.discover Looking for core definitions underneath /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores
   [junit4]   2> 289192 T1597 n:127.0.0.1:41964_ oasc.CoreDescriptor.<init> CORE DESCRIPTOR: {name=collection1, config=solrconfig.xml, transient=false, schema=schema.xml, loadOnStartup=true, instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1, collection=collection1, absoluteInstDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/, coreNodeName=, dataDir=data/, shard=}
   [junit4]   2> 289193 T1597 n:127.0.0.1:41964_ oasc.CorePropertiesLocator.discoverUnder Found core collection1 in /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/
   [junit4]   2> 289193 T1597 n:127.0.0.1:41964_ oasc.CorePropertiesLocator.discover Found 1 core definitions
   [junit4]   2> 289193 T1655 n:127.0.0.1:41964_ c:collection1 x:collection1 oasc.ZkController.publish publishing core=collection1 state=down collection=collection1
   [junit4]   2> 289194 T1655 n:127.0.0.1:41964_ c:collection1 x:collection1 oasc.ZkController.publish numShards not found on descriptor - reading it from system property
   [junit4]   2> 289194 T1655 n:127.0.0.1:41964_ oasc.ZkController.waitForCoreNodeName look for our core node name
   [junit4]   2> 289194 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 289195 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:41964",
   [junit4]   2> 	  "node_name":"127.0.0.1:41964_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"down",
   [junit4]   2> 	  "shard":null,
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"} current state version: 3
   [junit4]   2> 289195 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Update state numShards=2 message={
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:41964",
   [junit4]   2> 	  "node_name":"127.0.0.1:41964_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"down",
   [junit4]   2> 	  "shard":null,
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"}
   [junit4]   2> 289195 T1626 n:127.0.0.1:45017_ oasco.ClusterStateMutator.createCollection building a new cName: collection1
   [junit4]   2> 289195 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Assigning new node to shard shard=shard2
   [junit4]   2> 290194 T1655 n:127.0.0.1:41964_ oasc.ZkController.waitForShardId waiting to find shard id in clusterstate for collection1
   [junit4]   2> 290195 T1655 n:127.0.0.1:41964_ oasc.ZkController.createCollectionZkNode Check for collection zkNode:collection1
   [junit4]   2> 290195 T1655 n:127.0.0.1:41964_ oasc.ZkController.createCollectionZkNode Collection zkNode exists
   [junit4]   2> 290196 T1655 n:127.0.0.1:41964_ oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/'
   [junit4]   2> 290205 T1655 n:127.0.0.1:41964_ oasc.Config.<init> loaded config solrconfig.xml with version 0 
   [junit4]   2> 290211 T1655 n:127.0.0.1:41964_ oasc.SolrConfig.refreshRequestParams current version of requestparams : -1
   [junit4]   2> 290215 T1655 n:127.0.0.1:41964_ oasc.SolrConfig.<init> Using Lucene MatchVersion: 6.0.0
   [junit4]   2> 290223 T1655 n:127.0.0.1:41964_ oasc.SolrConfig.<init> Loaded SolrConfig: solrconfig.xml
   [junit4]   2> 290224 T1655 n:127.0.0.1:41964_ oass.IndexSchema.readSchema Reading Solr Schema from /configs/conf1/schema.xml
   [junit4]   2> 290228 T1655 n:127.0.0.1:41964_ oass.IndexSchema.readSchema [collection1] Schema name=test
   [junit4]   2> 290341 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider.init Initialized with rates=open-exchange-rates.json, refreshInterval=1440.
   [junit4]   2> 290345 T1655 n:127.0.0.1:41964_ oass.IndexSchema.readSchema default search field in schema is text
   [junit4]   2> 290346 T1655 n:127.0.0.1:41964_ oass.IndexSchema.readSchema unique key field: id
   [junit4]   2> 290366 T1655 n:127.0.0.1:41964_ oass.FileExchangeRateProvider.reload Reloading exchange rates from file currency.xml
   [junit4]   2> 290369 T1655 n:127.0.0.1:41964_ oass.FileExchangeRateProvider.reload Reloading exchange rates from file currency.xml
   [junit4]   2> 290371 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider.reload Reloading exchange rates from open-exchange-rates.json
   [junit4]   2> 290376 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Unknown key IMPORTANT NOTE
   [junit4]   2> 290377 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Expected key, got STRING
   [junit4]   2> 290377 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider.reload Reloading exchange rates from open-exchange-rates.json
   [junit4]   2> 290377 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Unknown key IMPORTANT NOTE
   [junit4]   2> 290378 T1655 n:127.0.0.1:41964_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Expected key, got STRING
   [junit4]   2> 290378 T1655 n:127.0.0.1:41964_ oasc.CoreContainer.create Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 290378 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.initDirectoryFactory org.apache.solr.core.MockDirectoryFactory
   [junit4]   2> 290378 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.<init> [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/], dataDir=[null]
   [junit4]   2> 290379 T1655 n:127.0.0.1:41964_ x:collection1 oasc.JmxMonitoredMap.<init> JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@695883a8
   [junit4]   2> 290380 T1655 n:127.0.0.1:41964_ x:collection1 oasc.CachingDirectoryFactory.get return new directory for /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/data
   [junit4]   2> 290380 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.getNewIndexDir New index directory detected: old=null new=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/data/index/
   [junit4]   2> 290380 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.initIndex WARN [collection1] Solr index directory '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/data/index' doesn't exist. Creating new index...
   [junit4]   2> 290380 T1655 n:127.0.0.1:41964_ x:collection1 oasc.CachingDirectoryFactory.get return new directory for /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-1-001/cores/collection1/data/index
   [junit4]   2> 290380 T1655 n:127.0.0.1:41964_ x:collection1 oasu.RandomMergePolicy.<init> RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=39, maxMergeAtOnceExplicit=50, maxMergedSegmentMB=82.201171875, floorSegmentMB=2.064453125, forceMergeDeletesPctAllowed=19.581522441688747, segmentsPerTier=11.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.6794599103411276
   [junit4]   2> 290381 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrDeletionPolicy.onCommit SolrDeletionPolicy.onCommit: commits: num=1
   [junit4]   2> 		commit{dir=MockDirectoryWrapper(RAMDirectory@2c4e4f23 lockFactory=org.apache.lucene.store.SingleInstanceLockFactory@284fea4e),segFN=segments_1,generation=1}
   [junit4]   2> 290382 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrDeletionPolicy.updateCommits newest commit generation = 1
   [junit4]   2> 290386 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "nodistrib"
   [junit4]   2> 290387 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "dedupe"
   [junit4]   2> 290387 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "dedupe"
   [junit4]   2> 290387 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "stored_sig"
   [junit4]   2> 290388 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "stored_sig"
   [junit4]   2> 290388 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "distrib-dup-test-chain-explicit"
   [junit4]   2> 290389 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "distrib-dup-test-chain-implicit"
   [junit4]   2> 290389 T1655 n:127.0.0.1:41964_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "distrib-dup-test-chain-implicit"
   [junit4]   2> 290389 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.loadUpdateProcessorChains no updateRequestProcessorChain defined as default, creating implicit default
   [junit4]   2> 290391 T1655 n:127.0.0.1:41964_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 290393 T1655 n:127.0.0.1:41964_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 290394 T1655 n:127.0.0.1:41964_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 290394 T1655 n:127.0.0.1:41964_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 290399 T1655 n:127.0.0.1:41964_ x:collection1 oasc.RequestHandlers.initHandlersFromConfig Registered paths: /admin/mbeans,standard,/update/csv,/update/json/docs,/admin/luke,/admin/segments,/get,/admin/system,/replication,/admin/properties,/config,/schema,/admin/plugins,/admin/logging,/update/json,/admin/threads,/admin/ping,/update,/admin/file
   [junit4]   2> 290400 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.initStatsCache Using default statsCache cache: org.apache.solr.search.stats.LocalStatsCache
   [junit4]   2> 290400 T1655 n:127.0.0.1:41964_ x:collection1 oasu.UpdateHandler.<init> Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 290401 T1655 n:127.0.0.1:41964_ x:collection1 oasu.UpdateLog.init Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10
   [junit4]   2> 290401 T1655 n:127.0.0.1:41964_ x:collection1 oasu.CommitTracker.<init> Hard AutoCommit: disabled
   [junit4]   2> 290401 T1655 n:127.0.0.1:41964_ x:collection1 oasu.CommitTracker.<init> Soft AutoCommit: disabled
   [junit4]   2> 290403 T1655 n:127.0.0.1:41964_ x:collection1 oasu.RandomMergePolicy.<init> RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=14, maxMergeAtOnceExplicit=19, maxMergedSegmentMB=54.2548828125, floorSegmentMB=1.7099609375, forceMergeDeletesPctAllowed=9.511577512215029, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
   [junit4]   2> 290403 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrDeletionPolicy.onInit SolrDeletionPolicy.onInit: commits: num=1
   [junit4]   2> 		commit{dir=MockDirectoryWrapper(RAMDirectory@2c4e4f23 lockFactory=org.apache.lucene.store.SingleInstanceLockFactory@284fea4e),segFN=segments_1,generation=1}
   [junit4]   2> 290404 T1655 n:127.0.0.1:41964_ x:collection1 oasc.SolrDeletionPolicy.updateCommits newest commit generation = 1
   [junit4]   2> 290404 T1655 n:127.0.0.1:41964_ x:collection1 oass.SolrIndexSearcher.<init> Opening Searcher@714c67da[collection1] main
   [junit4]   2> 290405 T1655 n:127.0.0.1:41964_ x:collection1 oasr.ManagedResourceStorage.newStorageIO Setting up ZooKeeper-based storage for the RestManager with znodeBase: /configs/conf1
   [junit4]   2> 290405 T1655 n:127.0.0.1:41964_ x:collection1 oasr.ManagedResourceStorage$ZooKeeperStorageIO.configure Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 290405 T1655 n:127.0.0.1:41964_ x:collection1 oasr.RestManager.init Initializing RestManager with initArgs: {}
   [junit4]   2> 290406 T1655 n:127.0.0.1:41964_ x:collection1 oasr.ManagedResourceStorage.load Reading _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 290406 T1655 n:127.0.0.1:41964_ x:collection1 oasr.ManagedResourceStorage$ZooKeeperStorageIO.openInputStream No data found for znode /configs/conf1/_rest_managed.json
   [junit4]   2> 290406 T1655 n:127.0.0.1:41964_ x:collection1 oasr.ManagedResourceStorage.load Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 290406 T1655 n:127.0.0.1:41964_ x:collection1 oasr.RestManager.init Initializing 0 registered ManagedResources
   [junit4]   2> 290406 T1655 n:127.0.0.1:41964_ x:collection1 oash.ReplicationHandler.inform Commits will be reserved for  10000
   [junit4]   2> 290407 T1655 n:127.0.0.1:41964_ x:collection1 oasc.ZkController.getConfDirListeners watch zkdir /configs/conf1
   [junit4]   2> 290407 T1655 n:127.0.0.1:41964_ x:collection1 oasc.CoreContainer.registerCore registering core: collection1
   [junit4]   2> 290407 T1656 n:127.0.0.1:41964_ x:collection1 oasc.SolrCore.registerSearcher [collection1] Registered new searcher Searcher@714c67da[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 290416 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ZkController.register Register replica - core:collection1 address:http://127.0.0.1:41964 collection:collection1 shard:shard2
   [junit4]   2> 290416 T1597 n:127.0.0.1:41964_ oass.SolrDispatchFilter.init user.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0
   [junit4]   2> 290416 T1597 n:127.0.0.1:41964_ oass.SolrDispatchFilter.init SolrDispatchFilter.init() done
   [junit4]   2> 290430 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess Running the leader process for shard shard2
   [junit4]   2> 290431 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 290431 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ShardLeaderElectionContext.waitForReplicasToComeUp Enough replicas found to continue.
   [junit4]   2> 290431 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess I may be the new leader - try and sync
   [junit4]   2> ASYNC  NEW_CORE C249 name=collection1 org.apache.solr.core.SolrCore@53a62f0b url=http://127.0.0.1:41964/collection1 node=127.0.0.1:41964_ C249_STATE=coll:collection1 core:collection1 props:{core=collection1, base_url=http://127.0.0.1:41964, node_name=127.0.0.1:41964_, state=down}
   [junit4]   2> 290431 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 C249 oasc.SyncStrategy.sync Sync replicas to http://127.0.0.1:41964/collection1/
   [junit4]   2> 290431 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 C249 oasc.SyncStrategy.syncReplicas Sync Success - now sync replicas to me
   [junit4]   2> 290431 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 C249 oasc.SyncStrategy.syncToMe http://127.0.0.1:41964/collection1/ has no replicas
   [junit4]   2> 290431 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "operation":"leader",
   [junit4]   2> 	  "shard":"shard2",
   [junit4]   2> 	  "collection":"collection1"} current state version: 4
   [junit4]   2> 290432 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess I am the new leader: http://127.0.0.1:41964/collection1/ shard2
   [junit4]   2> 290433 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 290434 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "operation":"leader",
   [junit4]   2> 	  "shard":"shard2",
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "base_url":"http://127.0.0.1:41964",
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "state":"active"} current state version: 4
   [junit4]   2> 290499 T1597 oas.SolrTestCaseJ4.writeCoreProperties Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1
   [junit4]   2> 290500 T1597 oasc.AbstractFullDistribZkTestBase.createJettys create jetty 2 in directory /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001
   [junit4]   2> 290500 T1597 oejs.Server.doStart jetty-9.2.10.v20150310
   [junit4]   2> 290502 T1597 oejsh.ContextHandler.doStart Started o.e.j.s.ServletContextHandler@305a7976{/,null,AVAILABLE}
   [junit4]   2> 290502 T1597 oejs.AbstractConnector.doStart Started ServerConnector@72ac8fbc{HTTP/1.1}{127.0.0.1:?????}
   [junit4]   2> 290502 T1597 oejs.Server.doStart Started @??????ms
   [junit4]   2> 290502 T1597 oascse.JettySolrRunner$1.lifeCycleStarted Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/tempDir-001/jetty2, solrconfig=solrconfig.xml, hostContext=/, hostPort=37244, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores}
   [junit4]   2> 290503 T1597 oass.SolrDispatchFilter.init SolrDispatchFilter.init()sun.misc.Launcher$AppClassLoader@4e0e2f2a
   [junit4]   2> 290503 T1597 oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/'
   [junit4]   2> 290514 T1597 oasc.SolrXmlConfig.fromFile Loading container configuration from /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/solr.xml
   [junit4]   2> 290519 T1597 oasc.CorePropertiesLocator.<init> Config-defined core root directory: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores
   [junit4]   2> 290520 T1597 oasc.CoreContainer.<init> New CoreContainer 1048005489
   [junit4]   2> 290520 T1597 oasc.CoreContainer.load Loading cores into CoreContainer [instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/]
   [junit4]   2> 290521 T1597 oasc.CoreContainer.load loading shared library: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/lib
   [junit4]   2> 290521 T1597 oasc.SolrResourceLoader.addToClassLoader WARN Can't find (or read) directory to add to classloader: lib (resolved as: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/lib).
   [junit4]   2> 290526 T1597 oashc.HttpShardHandlerFactory.init created with socketTimeout : 90000,urlScheme : ,connTimeout : 15000,maxConnectionsPerHost : 20,maxConnections : 10000,corePoolSize : 0,maximumPoolSize : 2147483647,maxThreadIdleTime : 5,sizeOfQueue : -1,fairnessPolicy : false,useRetries : false,
   [junit4]   2> 290527 T1597 oasu.UpdateShardHandler.<init> Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 290527 T1597 oasl.LogWatcher.createWatcher SLF4J impl is org.slf4j.impl.Log4jLoggerFactory
   [junit4]   2> 290527 T1597 oasl.LogWatcher.newRegisteredLogWatcher Registering Log Listener [Log4j (org.slf4j.impl.Log4jLoggerFactory)]
   [junit4]   2> 290528 T1597 oasc.CoreContainer.load Node Name: 127.0.0.1
   [junit4]   2> 290528 T1597 oasc.ZkContainer.initZooKeeper Zookeeper client=127.0.0.1:36134/solr
   [junit4]   2> 290528 T1597 oasc.ZkController.checkChrootPath zkHost includes chroot
   [junit4]   2> 290584 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ZkController.register We are http://127.0.0.1:41964/collection1/ and leader is http://127.0.0.1:41964/collection1/
   [junit4]   2> 290585 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ZkController.register No LogReplay needed for core=collection1 baseURL=http://127.0.0.1:41964
   [junit4]   2> 290585 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ZkController.checkRecovery I am the leader, no recovery necessary
   [junit4]   2> 290585 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ZkController.publish publishing core=collection1 state=active collection=collection1
   [junit4]   2> 290586 T1659 n:127.0.0.1:41964_ c:collection1 s:shard2 x:collection1 oasc.ZkController.publish numShards not found on descriptor - reading it from system property
   [junit4]   2> 290587 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 290588 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "core_node_name":"core_node1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:41964",
   [junit4]   2> 	  "node_name":"127.0.0.1:41964_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"active",
   [junit4]   2> 	  "shard":"shard2",
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"} current state version: 5
   [junit4]   2> 290588 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Update state numShards=2 message={
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "core_node_name":"core_node1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:41964",
   [junit4]   2> 	  "node_name":"127.0.0.1:41964_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"active",
   [junit4]   2> 	  "shard":"shard2",
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"}
   [junit4]   2> 291539 T1597 n:127.0.0.1:37244_ oasc.ZkController.createEphemeralLiveNode Register node as live in ZooKeeper:/live_nodes/127.0.0.1:37244_
   [junit4]   2> 291541 T1597 n:127.0.0.1:37244_ oasc.Overseer.close Overseer (id=null) closing
   [junit4]   2> 291542 T1597 n:127.0.0.1:37244_ oasc.CorePropertiesLocator.discover Looking for core definitions underneath /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores
   [junit4]   2> 291543 T1597 n:127.0.0.1:37244_ oasc.CoreDescriptor.<init> CORE DESCRIPTOR: {name=collection1, config=solrconfig.xml, transient=false, schema=schema.xml, loadOnStartup=true, instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1, collection=collection1, absoluteInstDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/, coreNodeName=, dataDir=data/, shard=}
   [junit4]   2> 291544 T1597 n:127.0.0.1:37244_ oasc.CorePropertiesLocator.discoverUnder Found core collection1 in /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/
   [junit4]   2> 291544 T1597 n:127.0.0.1:37244_ oasc.CorePropertiesLocator.discover Found 1 core definitions
   [junit4]   2> 291545 T1678 n:127.0.0.1:37244_ c:collection1 x:collection1 oasc.ZkController.publish publishing core=collection1 state=down collection=collection1
   [junit4]   2> 291545 T1678 n:127.0.0.1:37244_ c:collection1 x:collection1 oasc.ZkController.publish numShards not found on descriptor - reading it from system property
   [junit4]   2> 291546 T1678 n:127.0.0.1:37244_ oasc.ZkController.waitForCoreNodeName look for our core node name
   [junit4]   2> 291546 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 291547 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:37244",
   [junit4]   2> 	  "node_name":"127.0.0.1:37244_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"down",
   [junit4]   2> 	  "shard":null,
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"} current state version: 6
   [junit4]   2> 291547 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Update state numShards=2 message={
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:37244",
   [junit4]   2> 	  "node_name":"127.0.0.1:37244_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"down",
   [junit4]   2> 	  "shard":null,
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"}
   [junit4]   2> 291547 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Collection already exists with numShards=2
   [junit4]   2> 291547 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Assigning new node to shard shard=shard1
   [junit4]   2> 292546 T1678 n:127.0.0.1:37244_ oasc.ZkController.waitForShardId waiting to find shard id in clusterstate for collection1
   [junit4]   2> 292547 T1678 n:127.0.0.1:37244_ oasc.ZkController.createCollectionZkNode Check for collection zkNode:collection1
   [junit4]   2> 292547 T1678 n:127.0.0.1:37244_ oasc.ZkController.createCollectionZkNode Collection zkNode exists
   [junit4]   2> 292548 T1678 n:127.0.0.1:37244_ oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/'
   [junit4]   2> 292559 T1678 n:127.0.0.1:37244_ oasc.Config.<init> loaded config solrconfig.xml with version 0 
   [junit4]   2> 292565 T1678 n:127.0.0.1:37244_ oasc.SolrConfig.refreshRequestParams current version of requestparams : -1
   [junit4]   2> 292568 T1678 n:127.0.0.1:37244_ oasc.SolrConfig.<init> Using Lucene MatchVersion: 6.0.0
   [junit4]   2> 292577 T1678 n:127.0.0.1:37244_ oasc.SolrConfig.<init> Loaded SolrConfig: solrconfig.xml
   [junit4]   2> 292578 T1678 n:127.0.0.1:37244_ oass.IndexSchema.readSchema Reading Solr Schema from /configs/conf1/schema.xml
   [junit4]   2> 292582 T1678 n:127.0.0.1:37244_ oass.IndexSchema.readSchema [collection1] Schema name=test
   [junit4]   2> 292670 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider.init Initialized with rates=open-exchange-rates.json, refreshInterval=1440.
   [junit4]   2> 292676 T1678 n:127.0.0.1:37244_ oass.IndexSchema.readSchema default search field in schema is text
   [junit4]   2> 292677 T1678 n:127.0.0.1:37244_ oass.IndexSchema.readSchema unique key field: id
   [junit4]   2> 292682 T1678 n:127.0.0.1:37244_ oass.FileExchangeRateProvider.reload Reloading exchange rates from file currency.xml
   [junit4]   2> 292684 T1678 n:127.0.0.1:37244_ oass.FileExchangeRateProvider.reload Reloading exchange rates from file currency.xml
   [junit4]   2> 292685 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider.reload Reloading exchange rates from open-exchange-rates.json
   [junit4]   2> 292686 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Unknown key IMPORTANT NOTE
   [junit4]   2> 292686 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Expected key, got STRING
   [junit4]   2> 292686 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider.reload Reloading exchange rates from open-exchange-rates.json
   [junit4]   2> 292686 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Unknown key IMPORTANT NOTE
   [junit4]   2> 292687 T1678 n:127.0.0.1:37244_ oass.OpenExchangeRatesOrgProvider$OpenExchangeRates.<init> WARN Expected key, got STRING
   [junit4]   2> 292687 T1678 n:127.0.0.1:37244_ oasc.CoreContainer.create Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 292687 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.initDirectoryFactory org.apache.solr.core.MockDirectoryFactory
   [junit4]   2> 292687 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.<init> [[collection1] ] Opening new SolrCore at [/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/], dataDir=[null]
   [junit4]   2> 292687 T1678 n:127.0.0.1:37244_ x:collection1 oasc.JmxMonitoredMap.<init> JMX monitoring is enabled. Adding Solr mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@695883a8
   [junit4]   2> 292688 T1678 n:127.0.0.1:37244_ x:collection1 oasc.CachingDirectoryFactory.get return new directory for /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/data
   [junit4]   2> 292688 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.getNewIndexDir New index directory detected: old=null new=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/data/index/
   [junit4]   2> 292688 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.initIndex WARN [collection1] Solr index directory '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/data/index' doesn't exist. Creating new index...
   [junit4]   2> 292688 T1678 n:127.0.0.1:37244_ x:collection1 oasc.CachingDirectoryFactory.get return new directory for /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-2-001/cores/collection1/data/index
   [junit4]   2> 292688 T1678 n:127.0.0.1:37244_ x:collection1 oasu.RandomMergePolicy.<init> RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=39, maxMergeAtOnceExplicit=50, maxMergedSegmentMB=82.201171875, floorSegmentMB=2.064453125, forceMergeDeletesPctAllowed=19.581522441688747, segmentsPerTier=11.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.6794599103411276
   [junit4]   2> 292689 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrDeletionPolicy.onCommit SolrDeletionPolicy.onCommit: commits: num=1
   [junit4]   2> 		commit{dir=MockDirectoryWrapper(RAMDirectory@4ecdeb7a lockFactory=org.apache.lucene.store.SingleInstanceLockFactory@73d50d0d),segFN=segments_1,generation=1}
   [junit4]   2> 292689 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrDeletionPolicy.updateCommits newest commit generation = 1
   [junit4]   2> 292693 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "nodistrib"
   [junit4]   2> 292694 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "dedupe"
   [junit4]   2> 292694 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "dedupe"
   [junit4]   2> 292694 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "stored_sig"
   [junit4]   2> 292694 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "stored_sig"
   [junit4]   2> 292694 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "distrib-dup-test-chain-explicit"
   [junit4]   2> 292695 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init creating updateRequestProcessorChain "distrib-dup-test-chain-implicit"
   [junit4]   2> 292695 T1678 n:127.0.0.1:37244_ x:collection1 oasup.UpdateRequestProcessorChain.init inserting DistributedUpdateProcessorFactory into updateRequestProcessorChain "distrib-dup-test-chain-implicit"
   [junit4]   2> 292695 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.loadUpdateProcessorChains no updateRequestProcessorChain defined as default, creating implicit default
   [junit4]   2> 292697 T1678 n:127.0.0.1:37244_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 292698 T1678 n:127.0.0.1:37244_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 292700 T1678 n:127.0.0.1:37244_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 292700 T1678 n:127.0.0.1:37244_ x:collection1 oashl.XMLLoader.init xsltCacheLifetimeSeconds=60
   [junit4]   2> 292705 T1678 n:127.0.0.1:37244_ x:collection1 oasc.RequestHandlers.initHandlersFromConfig Registered paths: /admin/mbeans,standard,/update/csv,/update/json/docs,/admin/luke,/admin/segments,/get,/admin/system,/replication,/admin/properties,/config,/schema,/admin/plugins,/admin/logging,/update/json,/admin/threads,/admin/ping,/update,/admin/file
   [junit4]   2> 292706 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.initStatsCache Using default statsCache cache: org.apache.solr.search.stats.LocalStatsCache
   [junit4]   2> 292707 T1678 n:127.0.0.1:37244_ x:collection1 oasu.UpdateHandler.<init> Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 292707 T1678 n:127.0.0.1:37244_ x:collection1 oasu.UpdateLog.init Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10
   [junit4]   2> 292707 T1678 n:127.0.0.1:37244_ x:collection1 oasu.CommitTracker.<init> Hard AutoCommit: disabled
   [junit4]   2> 292707 T1678 n:127.0.0.1:37244_ x:collection1 oasu.CommitTracker.<init> Soft AutoCommit: disabled
   [junit4]   2> 292708 T1678 n:127.0.0.1:37244_ x:collection1 oasu.RandomMergePolicy.<init> RandomMergePolicy wrapping class org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: maxMergeAtOnce=14, maxMergeAtOnceExplicit=19, maxMergedSegmentMB=54.2548828125, floorSegmentMB=1.7099609375, forceMergeDeletesPctAllowed=9.511577512215029, segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=1.0
   [junit4]   2> 292709 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrDeletionPolicy.onInit SolrDeletionPolicy.onInit: commits: num=1
   [junit4]   2> 		commit{dir=MockDirectoryWrapper(RAMDirectory@4ecdeb7a lockFactory=org.apache.lucene.store.SingleInstanceLockFactory@73d50d0d),segFN=segments_1,generation=1}
   [junit4]   2> 292709 T1678 n:127.0.0.1:37244_ x:collection1 oasc.SolrDeletionPolicy.updateCommits newest commit generation = 1
   [junit4]   2> 292709 T1678 n:127.0.0.1:37244_ x:collection1 oass.SolrIndexSearcher.<init> Opening Searcher@7176e0ee[collection1] main
   [junit4]   2> 292710 T1678 n:127.0.0.1:37244_ x:collection1 oasr.ManagedResourceStorage.newStorageIO Setting up ZooKeeper-based storage for the RestManager with znodeBase: /configs/conf1
   [junit4]   2> 292710 T1678 n:127.0.0.1:37244_ x:collection1 oasr.ManagedResourceStorage$ZooKeeperStorageIO.configure Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 292710 T1678 n:127.0.0.1:37244_ x:collection1 oasr.RestManager.init Initializing RestManager with initArgs: {}
   [junit4]   2> 292710 T1678 n:127.0.0.1:37244_ x:collection1 oasr.ManagedResourceStorage.load Reading _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 292711 T1678 n:127.0.0.1:37244_ x:collection1 oasr.ManagedResourceStorage$ZooKeeperStorageIO.openInputStream No data found for znode /configs/conf1/_rest_managed.json
   [junit4]   2> 292711 T1678 n:127.0.0.1:37244_ x:collection1 oasr.ManagedResourceStorage.load Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 292712 T1678 n:127.0.0.1:37244_ x:collection1 oasr.RestManager.init Initializing 0 registered ManagedResources
   [junit4]   2> 292712 T1678 n:127.0.0.1:37244_ x:collection1 oash.ReplicationHandler.inform Commits will be reserved for  10000
   [junit4]   2> 292712 T1678 n:127.0.0.1:37244_ x:collection1 oasc.ZkController.getConfDirListeners watch zkdir /configs/conf1
   [junit4]   2> 292713 T1679 n:127.0.0.1:37244_ x:collection1 oasc.SolrCore.registerSearcher [collection1] Registered new searcher Searcher@7176e0ee[collection1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 292713 T1678 n:127.0.0.1:37244_ x:collection1 oasc.CoreContainer.registerCore registering core: collection1
   [junit4]   2> 292714 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ZkController.register Register replica - core:collection1 address:http://127.0.0.1:37244 collection:collection1 shard:shard1
   [junit4]   2> 292714 T1597 n:127.0.0.1:37244_ oass.SolrDispatchFilter.init user.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0
   [junit4]   2> 292714 T1597 n:127.0.0.1:37244_ oass.SolrDispatchFilter.init SolrDispatchFilter.init() done
   [junit4]   2> 292721 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess Running the leader process for shard shard1
   [junit4]   2> 292722 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 292722 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ShardLeaderElectionContext.waitForReplicasToComeUp Enough replicas found to continue.
   [junit4]   2> 292722 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess I may be the new leader - try and sync
   [junit4]   2> ASYNC  NEW_CORE C250 name=collection1 org.apache.solr.core.SolrCore@612a3cf6 url=http://127.0.0.1:37244/collection1 node=127.0.0.1:37244_ C250_STATE=coll:collection1 core:collection1 props:{core=collection1, base_url=http://127.0.0.1:37244, node_name=127.0.0.1:37244_, state=down}
   [junit4]   2> 292722 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 C250 oasc.SyncStrategy.sync Sync replicas to http://127.0.0.1:37244/collection1/
   [junit4]   2> 292722 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "operation":"leader",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"collection1"} current state version: 7
   [junit4]   2> 292722 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 C250 oasc.SyncStrategy.syncReplicas Sync Success - now sync replicas to me
   [junit4]   2> 292723 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 C250 oasc.SyncStrategy.syncToMe http://127.0.0.1:37244/collection1/ has no replicas
   [junit4]   2> 292723 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ShardLeaderElectionContext.runLeaderProcess I am the new leader: http://127.0.0.1:37244/collection1/ shard1
   [junit4]   2> 292724 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 292725 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "operation":"leader",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "base_url":"http://127.0.0.1:37244",
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "state":"active"} current state version: 7
   [junit4]   2> 292787 T1597 oas.SolrTestCaseJ4.writeCoreProperties Writing core.properties file to /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/cores/collection1
   [junit4]   2> 292789 T1597 oasc.AbstractFullDistribZkTestBase.createJettys create jetty 3 in directory /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001
   [junit4]   2> 292790 T1597 oejs.Server.doStart jetty-9.2.10.v20150310
   [junit4]   2> 292824 T1597 oejsh.ContextHandler.doStart Started o.e.j.s.ServletContextHandler@310745e6{/,null,AVAILABLE}
   [junit4]   2> 292825 T1597 oejs.AbstractConnector.doStart Started ServerConnector@6472ed68{HTTP/1.1}{127.0.0.1:?????}
   [junit4]   2> 292825 T1597 oejs.Server.doStart Started @??????ms
   [junit4]   2> 292825 T1597 oascse.JettySolrRunner$1.lifeCycleStarted Jetty properties: {solr.data.dir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/tempDir-001/jetty3, solrconfig=solrconfig.xml, hostContext=/, hostPort=47799, coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/cores}
   [junit4]   2> 292826 T1597 oass.SolrDispatchFilter.init SolrDispatchFilter.init()sun.misc.Launcher$AppClassLoader@4e0e2f2a
   [junit4]   2> 292826 T1597 oasc.SolrResourceLoader.<init> new SolrResourceLoader for directory: '/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/'
   [junit4]   2> 292855 T1597 oasc.SolrXmlConfig.fromFile Loading container configuration from /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/solr.xml
   [junit4]   2> 292859 T1597 oasc.CorePropertiesLocator.<init> Config-defined core root directory: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/cores
   [junit4]   2> 292865 T1597 oasc.CoreContainer.<init> New CoreContainer 1571119177
   [junit4]   2> 292865 T1597 oasc.CoreContainer.load Loading cores into CoreContainer [instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/]
   [junit4]   2> 292865 T1597 oasc.CoreContainer.load loading shared library: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/lib
   [junit4]   2> 292866 T1597 oasc.SolrResourceLoader.addToClassLoader WARN Can't find (or read) directory to add to classloader: lib (resolved as: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/lib).
   [junit4]   2> 292875 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ZkController.register We are http://127.0.0.1:37244/collection1/ and leader is http://127.0.0.1:37244/collection1/
   [junit4]   2> 292875 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ZkController.register No LogReplay needed for core=collection1 baseURL=http://127.0.0.1:37244
   [junit4]   2> 292875 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ZkController.checkRecovery I am the leader, no recovery necessary
   [junit4]   2> 292875 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ZkController.publish publishing core=collection1 state=active collection=collection1
   [junit4]   2> 292875 T1682 n:127.0.0.1:37244_ c:collection1 s:shard1 x:collection1 oasc.ZkController.publish numShards not found on descriptor - reading it from system property
   [junit4]   2> 292877 T1625 n:127.0.0.1:45017_ oasc.DistributedQueue$LatchWatcher.process NodeChildrenChanged fired on path /overseer/queue state SyncConnected
   [junit4]   2> 292878 T1626 n:127.0.0.1:45017_ oasc.Overseer$ClusterStateUpdater.run processMessage: queueSize: 1, message = {
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "core_node_name":"core_node2",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:37244",
   [junit4]   2> 	  "node_name":"127.0.0.1:37244_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"active",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"} current state version: 8
   [junit4]   2> 292878 T1626 n:127.0.0.1:45017_ oasco.ReplicaMutator.updateState Update state numShards=2 message={
   [junit4]   2> 	  "core":"collection1",
   [junit4]   2> 	  "core_node_name":"core_node2",
   [junit4]   2> 	  "roles":null,
   [junit4]   2> 	  "base_url":"http://127.0.0.1:37244",
   [junit4]   2> 	  "node_name":"127.0.0.1:37244_",
   [junit4]   2> 	  "numShards":"2",
   [junit4]   2> 	  "state":"active",
   [junit4]   2> 	  "shard":"shard1",
   [junit4]   2> 	  "collection":"collection1",
   [junit4]   2> 	  "operation":"state"}
   [junit4]   2> 292878 T1597 oashc.HttpShardHandlerFactory.init created with socketTimeout : 90000,urlScheme : ,connTimeout : 15000,maxConnectionsPerHost : 20,maxConnections : 10000,corePoolSize : 0,maximumPoolSize : 2147483647,maxThreadIdleTime : 5,sizeOfQueue : -1,fairnessPolicy : false,useRetries : false,
   [junit4]   2> 292880 T1597 oasu.UpdateShardHandler.<init> Creating UpdateShardHandler HTTP client with params: socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 292880 T1597 oasl.LogWatcher.createWatcher SLF4J impl is org.slf4j.impl.Log4jLoggerFactory
   [junit4]   2> 292880 T1597 oasl.LogWatcher.newRegisteredLogWatcher Registering Log Listener [Log4j (org.slf4j.impl.Log4jLoggerFactory)]
   [junit4]   2> 292880 T1597 oasc.CoreContainer.load Node Name: 127.0.0.1
   [junit4]   2> 292881 T1597 oasc.ZkContainer.initZooKeeper Zookeeper client=127.0.0.1:36134/solr
   [junit4]   2> 292881 T1597 oasc.ZkController.checkChrootPath zkHost includes chroot
   [junit4]   2> 293895 T1597 n:127.0.0.1:47799_ oasc.ZkController.createEphemeralLiveNode Register node as live in ZooKeeper:/live_nodes/127.0.0.1:47799_
   [junit4]   2> 293897 T1597 n:127.0.0.1:47799_ oasc.Overseer.close Overseer (id=null) closing
   [junit4]   2> 293902 T1597 n:127.0.0.1:47799_ oasc.CorePropertiesLocator.discover Looking for core definitions underneath /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/cores
   [junit4]   2> 293904 T1597 n:127.0.0.1:47799_ oasc.CoreDescriptor.<init> CORE DESCRIPTOR: {name=collection1, config=solrconfig.xml, transient=false, schema=schema.xml, loadOnStartup=true, instanceDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001/shard-3-001/cores/collection1, collection=collection1, absoluteInstDir=/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/

[...truncated too long message...]

lector.access$200(LeaderElector.java:56)
   [junit4]   2> 	at org.apache.solr.cloud.LeaderElector$ElectionWatcher.process(LeaderElector.java:390)
   [junit4]   2> 	at org.apache.solr.common.cloud.SolrZkClient$3$1.run(SolrZkClient.java:264)
   [junit4]   2> 	at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2> 	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
   [junit4]   2> 	at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor$1.run(ExecutorUtil.java:148)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
   [junit4]   2> 	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
   [junit4]   2> 	at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> Caused by: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /overseer
   [junit4]   2> 	at org.apache.zookeeper.KeeperException.create(KeeperException.java:127)
   [junit4]   2> 	at org.apache.zookeeper.KeeperException.create(KeeperException.java:51)
   [junit4]   2> 	at org.apache.zookeeper.ZooKeeper.create(ZooKeeper.java:783)
   [junit4]   2> 	at org.apache.solr.common.cloud.SolrZkClient$9.execute(SolrZkClient.java:380)
   [junit4]   2> 	at org.apache.solr.common.cloud.SolrZkClient$9.execute(SolrZkClient.java:377)
   [junit4]   2> 	at org.apache.solr.common.cloud.ZkCmdExecutor.retryOperation(ZkCmdExecutor.java:61)
   [junit4]   2> 	at org.apache.solr.common.cloud.SolrZkClient.create(SolrZkClient.java:377)
   [junit4]   2> 	at org.apache.solr.cloud.Overseer.createOverseerNode(Overseer.java:923)
   [junit4]   2> 	... 20 more
   [junit4]   2> 
   [junit4]   2> 443497 T1597 oejs.AbstractConnector.doStop Stopped ServerConnector@6472ed68{HTTP/1.1}{127.0.0.1:?}
   [junit4]   2> 443497 T1597 oejsh.ContextHandler.doStop Stopped o.e.j.s.ServletContextHandler@310745e6{/,null,UNAVAILABLE}
   [junit4]   2> 443499 T1597 c:collection1 s:shard2 x:collection1 oasc.ZkTestServer.send4LetterWord connecting to 127.0.0.1:36134 36134
   [junit4]   2> 443608 T1598 oasc.ZkTestServer.send4LetterWord connecting to 127.0.0.1:36134 36134
   [junit4]   2> 443609 T1598 oasc.ZkTestServer$ZKServerMain.runFromConfig WARN Watch limit violations: 
   [junit4]   2> 	Maximum concurrent create/delete watches above limit:
   [junit4]   2> 	
   [junit4]   2> 		5	/solr/aliases.json
   [junit4]   2> 		5	/solr/clusterstate.json
   [junit4]   2> 		4	/solr/configs/conf1
   [junit4]   2> 	
   [junit4]   2> 	Maximum concurrent children watches above limit:
   [junit4]   2> 	
   [junit4]   2> 		5	/solr/live_nodes
   [junit4]   2> 		4	/solr/overseer/queue
   [junit4]   2> 		4	/solr/overseer/collection-queue-work
   [junit4]   2> 	
   [junit4]   2> 443609 T1597 c:collection1 s:shard2 x:collection1 oasc.SocketProxy.close WARN Closing 1 connections to: http://127.0.0.1:47799/, target: http://127.0.0.1:41524/
   [junit4]   2> 443609 T1597 c:collection1 s:shard2 x:collection1 oasc.SocketProxy.close WARN Closing 5 connections to: http://127.0.0.1:41964/, target: http://127.0.0.1:39172/
   [junit4]   2> 443610 T1597 c:collection1 s:shard2 x:collection1 oasc.SocketProxy.close WARN Closing 0 connections to: http://127.0.0.1:45017/, target: http://127.0.0.1:57471/
   [junit4]   2> 443610 T1597 c:collection1 s:shard2 x:collection1 oasc.SocketProxy.close WARN Closing 3 connections to: http://127.0.0.1:37244/, target: http://127.0.0.1:35817/
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test -Dtests.seed=78CDE9CFD1BB0D04 -Dtests.multiplier=3 -Dtests.slow=true -Dtests.locale=th_TH_TH_#u-nu-thai -Dtests.timezone=America/Knox_IN -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1
   [junit4] FAILURE  157s J0 | LeaderFailoverAfterPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: Didn't see replicas [core_node2, core_node3] come up within 90000 ms! ClusterState: DocCollection(c8n_1x3_lf)={
   [junit4]    >   "replicationFactor":"3",
   [junit4]    >   "shards":{"shard1":{
   [junit4]    >       "range":"80000000-7fffffff",
   [junit4]    >       "state":"active",
   [junit4]    >       "replicas":{
   [junit4]    >         "core_node1":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica2",
   [junit4]    >           "base_url":"http://127.0.0.1:45017",
   [junit4]    >           "node_name":"127.0.0.1:45017_",
   [junit4]    >           "state":"down"},
   [junit4]    >         "core_node2":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica1",
   [junit4]    >           "base_url":"http://127.0.0.1:47799",
   [junit4]    >           "node_name":"127.0.0.1:47799_",
   [junit4]    >           "state":"recovering"},
   [junit4]    >         "core_node3":{
   [junit4]    >           "core":"c8n_1x3_lf_shard1_replica3",
   [junit4]    >           "base_url":"http://127.0.0.1:41964",
   [junit4]    >           "node_name":"127.0.0.1:41964_",
   [junit4]    >           "state":"active",
   [junit4]    >           "leader":"true"}}}},
   [junit4]    >   "router":{"name":"compositeId"},
   [junit4]    >   "maxShardsPerNode":"1",
   [junit4]    >   "autoAddReplicas":"false"}
   [junit4]    > 	at __randomizedtesting.SeedInfo.seed([78CDE9CFD1BB0D04:F099D6157F4760FC]:0)
   [junit4]    > 	at org.apache.solr.cloud.HttpPartitionTest.waitToSeeReplicasActive(HttpPartitionTest.java:547)
   [junit4]    > 	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:178)
   [junit4]    > 	at org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:51)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:960)
   [junit4]    > 	at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:935)
   [junit4]    > 	at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 443617 T1597 c:collection1 s:shard2 x:collection1 oas.SolrTestCaseJ4.deleteCore ###deleteCore
   [junit4]   2> NOTE: leaving temporary files on disk at: /home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.LeaderFailoverAfterPartitionTest 78CDE9CFD1BB0D04-001
   [junit4]   2> 157158 T1596 ccr.ThreadLeakControl.checkThreadLeaks WARNING Will linger awaiting termination of 1 leaked thread(s).
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene50): {range_facet_l_dv=PostingsFormat(name=Memory doPackFST= true), _version_=FST50, multiDefault=TestBloomFilteredLucenePostings(BloomFilteringPostingsFormat(Lucene50(blocksize=128))), a_t=FST50, intDefault=FST50, id=PostingsFormat(name=Memory doPackFST= true), range_facet_i_dv=FST50, text=PostingsFormat(name=Direct), range_facet_l=FST50, timestamp=FST50}, docValues:{range_facet_l_dv=DocValuesFormat(name=Asserting), range_facet_i_dv=DocValuesFormat(name=Direct), timestamp=DocValuesFormat(name=Direct)}, sim=RandomSimilarityProvider(queryNorm=true,coord=crazy): {}, locale=th_TH_TH_#u-nu-thai, timezone=America/Knox_IN
   [junit4]   2> NOTE: Linux 3.13.0-49-generic amd64/Oracle Corporation 1.8.0_45 (64-bit)/cpus=12,threads=1,free=135580800,total=510132224
   [junit4]   2> NOTE: All tests run in this JVM: [ResponseHeaderTest, TestUpdate, TestMergePolicyConfig, SolrCoreTest, TestIntervalFaceting, DistributedFacetPivotSmallTest, LeaderElectionIntegrationTest, BasicDistributedZkTest, TestDefaultSearchFieldResource, SSLMigrationTest, TestDynamicFieldResource, RollingRestartTest, InfoHandlerTest, TestFileDictionaryLookup, DateFieldTest, RequestHandlersTest, SolrRequestParserTest, TestHdfsUpdateLog, TestIBSimilarityFactory, HdfsSyncSliceTest, PingRequestHandlerTest, TestWordDelimiterFilterFactory, ZkNodePropsTest, CSVRequestHandlerTest, TestJoin, TestImplicitCoreProperties, SolrCloudExampleTest, TestHighlightDedupGrouping, EnumFieldTest, ClusterStateTest, OutOfBoxZkACLAndCredentialsProvidersTest, URLClassifyProcessorTest, TestHashQParserPlugin, DateRangeFieldTest, UpdateRequestProcessorFactoryTest, TestQuerySenderListener, HdfsDirectoryFactoryTest, HighlighterConfigTest, SimplePostToolTest, TestFreeTextSuggestions, HdfsUnloadDistributedZkTest, UUIDFieldTest, SimpleCollectionCreateDeleteTest, TestRestManager, TestJmxMonitoredMap, TestCodecSupport, RankQueryTest, TermVectorComponentTest, SignatureUpdateProcessorFactoryTest, TestPhraseSuggestions, DateMathParserTest, LeaderFailoverAfterPartitionTest]
   [junit4] Completed [160/488] on J0 in 157.68s, 1 test, 1 failure <<< FAILURES!

[...truncated 1030 lines...]
BUILD FAILED
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/build.xml:526: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/build.xml:474: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/build.xml:61: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/extra-targets.xml:39: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/build.xml:229: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/solr/common-build.xml:512: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/lucene/common-build.xml:1415: The following error occurred while executing this line:
/home/jenkins/workspace/Lucene-Solr-trunk-Linux/lucene/common-build.xml:973: There were test failures: 488 suites, 1956 tests, 1 failure, 57 ignored (25 assumptions)

Total time: 41 minutes 3 seconds
Build step 'Invoke Ant' marked build as failure
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any