You are viewing a plain text version of this content. The canonical link for it is here.
Posted to dev@lucene.apache.org by Apache Jenkins Server <je...@builds.apache.org> on 2018/09/20 03:26:28 UTC

[JENKINS] Lucene-Solr-Tests-master - Build # 2818 - Unstable

Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/2818/

1 tests failed.
FAILED:  org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test

Error Message:
Error from server at https://127.0.0.1:33698/solr: Could not restore core

Stack Trace:
org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at https://127.0.0.1:33698/solr: Could not restore core
	at __randomizedtesting.SeedInfo.seed([77C9E397208DA122:FF9DDC4D8E71CCDA]:0)
	at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643)
	at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
	at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
	at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
	at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
	at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1107)
	at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884)
	at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817)
	at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
	at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
	at org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:313)
	at org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:144)
	at org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test(TestHdfsCloudBackupRestore.java:213)
	at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
	at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
	at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
	at java.lang.reflect.Method.invoke(Method.java:498)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1742)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:935)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:971)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:985)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
	at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:944)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:830)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:880)
	at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:891)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
	at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
	at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
	at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
	at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
	at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
	at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
	at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 12699 lines...]
   [junit4] Suite: org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4]   2> 598557 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/init-core-data-001
   [junit4]   2> 598558 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=2 numCloses=2
   [junit4]   2> 598558 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.SolrTestCaseJ4 Using TrieFields (NUMERIC_POINTS_SYSPROP=false) w/NUMERIC_DOCVALUES_SYSPROP=true
   [junit4]   2> 598629 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: @org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
   [junit4]   2> 602454 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.h.u.NativeCodeLoader Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 609352 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.h.m.i.MetricsConfig Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 610626 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Logging to org.apache.logging.slf4j.Log4jLogger@3e7c45eb via org.mortbay.log.Slf4jLog
   [junit4]   2> 610923 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 612740 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log jetty-6.1.26
   [junit4]   2> 613113 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs to ./temp/Jetty_lucene2.us.west_apache_org_42891_hdfs____6zyf0c/webapp
   [junit4]   2> 616197 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@lucene2-us-west.apache.org:42891
   [junit4]   2> 625391 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 625449 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log jetty-6.1.26
   [junit4]   2> 625637 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_46337_datanode____.lru3k4/webapp
   [junit4]   2> 627295 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:46337
   [junit4]   2> 631643 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 631645 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log jetty-6.1.26
   [junit4]   2> 631849 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Extract jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode to ./temp/Jetty_localhost_34506_datanode____.ihyty7/webapp
   [junit4]   2> 634056 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.m.log Started HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:34506
   [junit4]   2> 637914 ERROR (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-001/hdfsBaseDir/data/data3/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-001/hdfsBaseDir/data/data4/]]  heartbeating to lucene2-us-west.apache.org/127.0.0.1:33785) [    ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000
   [junit4]   2> 637935 ERROR (DataNode: [[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-001/hdfsBaseDir/data/data1/, [DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-001/hdfsBaseDir/data/data2/]]  heartbeating to lucene2-us-west.apache.org/127.0.0.1:33785) [    ] o.a.h.h.s.d.DirectoryScanner dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 ms/sec. Assuming default value of 1000
   [junit4]   2> 639339 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x46255ee6b8bf35: from storage DS-3ec55081-87c5-49b0-a238-b1d4542aad6c node DatanodeRegistration(127.0.0.1:44896, datanodeUuid=6a6851dc-4ae7-4efb-b1ff-b4da10435dc9, infoPort=37720, infoSecurePort=0, ipcPort=44288, storageInfo=lv=-56;cid=testClusterID;nsid=1529615062;c=0), blocks: 0, hasStaleStorage: true, processing time: 2 msecs
   [junit4]   2> 639339 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x46255ee7ab2fdb: from storage DS-fd733271-d479-4121-9c30-afe7ffce6a1c node DatanodeRegistration(127.0.0.1:35744, datanodeUuid=6655db5d-2246-4b63-a54a-7aaeb65701d8, infoPort=35855, infoSecurePort=0, ipcPort=42734, storageInfo=lv=-56;cid=testClusterID;nsid=1529615062;c=0), blocks: 0, hasStaleStorage: true, processing time: 0 msecs
   [junit4]   2> 639339 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x46255ee6b8bf35: from storage DS-0ac02aed-f025-42ec-be14-c563d131b702 node DatanodeRegistration(127.0.0.1:44896, datanodeUuid=6a6851dc-4ae7-4efb-b1ff-b4da10435dc9, infoPort=37720, infoSecurePort=0, ipcPort=44288, storageInfo=lv=-56;cid=testClusterID;nsid=1529615062;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 639339 INFO  (Block report processor) [    ] BlockStateChange BLOCK* processReport 0x46255ee7ab2fdb: from storage DS-aeb384ea-bd91-4476-9ee9-a0ee836cfc59 node DatanodeRegistration(127.0.0.1:35744, datanodeUuid=6655db5d-2246-4b63-a54a-7aaeb65701d8, infoPort=35855, infoSecurePort=0, ipcPort=42734, storageInfo=lv=-56;cid=testClusterID;nsid=1529615062;c=0), blocks: 0, hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 640740 WARN  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.c.a.c.TestHdfsCloudBackupRestore The NameNode is in SafeMode - Solr will wait 5 seconds and try again.
   [junit4]   2> 646311 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002
   [junit4]   2> 646312 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 646326 INFO  (Thread-733) [    ] o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 646326 INFO  (Thread-733) [    ] o.a.s.c.ZkTestServer Starting server
   [junit4]   2> 646363 ERROR (Thread-733) [    ] o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 646428 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.c.ZkTestServer start zk server on port:44742
   [junit4]   2> 646452 INFO  (zkConnectionManagerCallback-746-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 646539 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
   [junit4]   2> 646540 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
   [junit4]   2> 646631 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 646631 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 646632 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 646632 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 646632 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 646632 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 646633 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@7760175a{/solr,null,AVAILABLE}
   [junit4]   2> 646633 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.AbstractConnector Started ServerConnector@231b2b03{SSL,[ssl, http/1.1]}{127.0.0.1:38677}
   [junit4]   2> 646633 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.h.ContextHandler Started o.e.j.s.ServletContextHandler@78544950{/solr,null,AVAILABLE}
   [junit4]   2> 646633 INFO  (jetty-launcher-743-thread-2) [    ] o.e.j.s.Server Started @646886ms
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=38677}
   [junit4]   2> 646634 ERROR (jetty-launcher-743-thread-2) [    ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 8.0.0
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.AbstractConnector Started ServerConnector@55bfa81d{SSL,[ssl, http/1.1]}{127.0.0.1:33698}
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2018-09-20T01:29:04.410Z
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.e.j.s.Server Started @646887ms
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, hostPort=33698}
   [junit4]   2> 646634 ERROR (jetty-launcher-743-thread-1) [    ] o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be missing or incomplete.
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.s.SolrDispatchFilter Using logger factory org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 8.0.0
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 646634 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 646635 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 2018-09-20T01:29:04.411Z
   [junit4]   2> 646649 INFO  (zkConnectionManagerCallback-750-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 646650 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 646685 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 646704 INFO  (zkConnectionManagerCallback-748-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 646704 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 646744 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.c.SolrXmlConfig MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b, but no JMX reporters were configured - adding default JMX reporter.
   [junit4]   2> 646777 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 0x10498dacbd00001, likely client has closed socket
   [junit4]   2> 647091 INFO  (jetty-launcher-743-thread-1) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:44742/solr
   [junit4]   2> 647122 INFO  (zkConnectionManagerCallback-754-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 647164 INFO  (zkConnectionManagerCallback-756-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 648149 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:33698_solr
   [junit4]   2> 648163 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.Overseer Overseer (id=73351559434469380-127.0.0.1:33698_solr-n_0000000000) starting
   [junit4]   2> 648365 INFO  (zkConnectionManagerCallback-763-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 648367 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:44742/solr ready
   [junit4]   2> 648382 INFO  (OverseerStateUpdate-73351559434469380-127.0.0.1:33698_solr-n_0000000000) [n:127.0.0.1:33698_solr    ] o.a.s.c.Overseer Starting to work on the main queue : 127.0.0.1:33698_solr
   [junit4]   2> 648581 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:33698_solr
   [junit4]   2> 648602 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:33785/solr,solr.hdfs.confdir=}}
   [junit4]   2> 648602 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:33785/solr,solr.hdfs.confdir=}}
   [junit4]   2> 648872 INFO  (zkCallback-762-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 648888 INFO  (zkCallback-755-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 649018 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 649183 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 649257 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 649257 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 649328 INFO  (jetty-launcher-743-thread-1) [n:127.0.0.1:33698_solr    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node2/.
   [junit4]   2> 650684 INFO  (jetty-launcher-743-thread-2) [    ] o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:44742/solr
   [junit4]   2> 650703 INFO  (zkConnectionManagerCallback-768-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 650789 INFO  (zkConnectionManagerCallback-770-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 650853 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 650926 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 650926 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.ZkController Register node as live in ZooKeeper:/live_nodes/127.0.0.1:38677_solr
   [junit4]   2> 650940 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Added backup repository with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:33785/solr,solr.hdfs.confdir=}}
   [junit4]   2> 650940 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Default configuration for backup repository is with configuration params {type = repository,name = hdfs,class = org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = {name=hdfs, class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = {location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:33785/solr,solr.hdfs.confdir=}}
   [junit4]   2> 650992 INFO  (zkCallback-762-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 650996 INFO  (zkCallback-755-thread-2) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 651105 INFO  (zkCallback-769-thread-1) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 651244 INFO  (zkConnectionManagerCallback-777-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 651248 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 651249 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:44742/solr ready
   [junit4]   2> 651280 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history in memory.
   [junit4]   2> 651517 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 651645 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 651646 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 651647 INFO  (jetty-launcher-743-thread-2) [n:127.0.0.1:38677_solr    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/.
   [junit4]   2> 652084 INFO  (zkConnectionManagerCallback-780-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 652381 INFO  (zkConnectionManagerCallback-785-thread-1) [    ] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 652385 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 652386 INFO  (SUITE-TestHdfsCloudBackupRestore-seed#[77C9E397208DA122]-worker) [    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:44742/solr ready
   [junit4]   2> 653712 INFO  (TEST-TestHdfsCloudBackupRestore.test-seed#[77C9E397208DA122]) [    ] o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 653870 INFO  (qtp400397695-3245) [n:127.0.0.1:33698_solr    ] o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params collection.configName=conf1&version=2&pullReplicas=0&property.customKey=customValue&maxShardsPerNode=2&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&numShards=2&tlogReplicas=1&wt=javabin and sendToOCPQueue=true
   [junit4]   2> 653944 INFO  (OverseerThreadFactory-1481-thread-1-processing-n:127.0.0.1:33698_solr) [n:127.0.0.1:33698_solr    ] o.a.s.c.a.c.CreateCollectionCmd Create collection hdfsbackuprestore
   [junit4]   2> 654265 INFO  (OverseerStateUpdate-73351559434469380-127.0.0.1:33698_solr-n_0000000000) [n:127.0.0.1:33698_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:33698/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 654407 INFO  (OverseerStateUpdate-73351559434469380-127.0.0.1:33698_solr-n_0000000000) [n:127.0.0.1:33698_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_t2",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:38677/solr",
   [junit4]   2>   "type":"TLOG",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 654480 INFO  (OverseerStateUpdate-73351559434469380-127.0.0.1:33698_solr-n_0000000000) [n:127.0.0.1:33698_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_n3",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:33698/solr",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 654553 INFO  (OverseerStateUpdate-73351559434469380-127.0.0.1:33698_solr-n_0000000000) [n:127.0.0.1:33698_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_t4",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:38677/solr",
   [junit4]   2>   "type":"TLOG",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 655160 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr    x:hdfsbackuprestore_shard2_replica_n3] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_shard2_replica_n3&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 655160 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr    x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 655196 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr    x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 655504 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr    x:hdfsbackuprestore_shard2_replica_t4] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node8&name=hdfsbackuprestore_shard2_replica_t4&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 655667 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr    x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.a.CoreAdminOperation core create command qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node6&name=hdfsbackuprestore_shard1_replica_t2&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 656426 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 656429 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 656781 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.IndexSchema [hdfsbackuprestore_shard2_replica_n3] Schema name=minimal
   [junit4]   2> 656805 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema [hdfsbackuprestore_shard1_replica_n1] Schema name=minimal
   [junit4]   2> 657099 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 657099 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard1_replica_n1' using configuration from collection hdfsbackuprestore, trusted=true
   [junit4]   2> 657099 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry 'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 657100 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore [[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_n1], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_n1/data/]
   [junit4]   2> 657111 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 657119 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 657119 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard2_replica_n3' using configuration from collection hdfsbackuprestore, trusted=true
   [junit4]   2> 657120 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_n3' (registry 'solr.core.hdfsbackuprestore.shard2.replica_n3') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 657120 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore [[hdfsbackuprestore_shard2_replica_n3] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_n3], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_n3/data/]
   [junit4]   2> 657153 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 657222 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.IndexSchema [hdfsbackuprestore_shard1_replica_t2] Schema name=minimal
   [junit4]   2> 657242 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.s.IndexSchema [hdfsbackuprestore_shard2_replica_t4] Schema name=minimal
   [junit4]   2> 657277 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 657278 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard1_replica_t2' using configuration from collection hdfsbackuprestore, trusted=true
   [junit4]   2> 657278 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard1.replica_t2' (registry 'solr.core.hdfsbackuprestore.shard1.replica_t2') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 657278 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrCore [[hdfsbackuprestore_shard1_replica_t2] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_t2], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_t2/data/]
   [junit4]   2> 657375 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.s.IndexSchema Loaded schema minimal/1.1 with uniqueid field id
   [junit4]   2> 657375 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.CoreContainer Creating SolrCore 'hdfsbackuprestore_shard2_replica_t4' using configuration from collection hdfsbackuprestore, trusted=true
   [junit4]   2> 657376 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_t4' (registry 'solr.core.hdfsbackuprestore.shard2.replica_t4') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@512ffb1b
   [junit4]   2> 657376 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.SolrCore [[hdfsbackuprestore_shard2_replica_t4] ] Opening new SolrCore at [/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_t4], dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/./hdfsbackuprestore_shard2_replica_t4/data/]
   [junit4]   2> 657809 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 657809 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 657886 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 657886 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 657889 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening [Searcher@7efed038[hdfsbackuprestore_shard2_replica_n3] main]
   [junit4]   2> 657939 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 657939 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 657940 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 657940 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1612088035996860416
   [junit4]   2> 657960 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 657960 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 657960 INFO  (searcherExecutor-1494-thread-1-processing-n:127.0.0.1:33698_solr x:hdfsbackuprestore_shard2_replica_n3 c:hdfsbackuprestore s:shard2 r:core_node7) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_n3] Registered new searcher Searcher@7efed038[hdfsbackuprestore_shard2_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 657961 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 657961 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 658085 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore/terms/shard2 to Terms{values={core_node7=0}, version=0}
   [junit4]   2> 658101 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext Waiting until we see more replicas up for shard shard2: total=2 found=1 timeoutin=9986ms
   [junit4]   2> 658121 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening [Searcher@39a0b995[hdfsbackuprestore_shard1_replica_n1] main]
   [junit4]   2> 658138 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 658138 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 658139 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 658139 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1612088036205527040
   [junit4]   2> 658174 INFO  (searcherExecutor-1493-thread-1-processing-n:127.0.0.1:33698_solr x:hdfsbackuprestore_shard1_replica_n1 c:hdfsbackuprestore s:shard1 r:core_node5) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_n1] Registered new searcher Searcher@39a0b995[hdfsbackuprestore_shard1_replica_n1] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 658446 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 658446 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 658447 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 658447 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 658499 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.SolrIndexSearcher Opening [Searcher@6051fcb[hdfsbackuprestore_shard1_replica_t2] main]
   [junit4]   2> 658500 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 658501 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 658502 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 658502 INFO  (searcherExecutor-1495-thread-1-processing-n:127.0.0.1:38677_solr x:hdfsbackuprestore_shard1_replica_t2 c:hdfsbackuprestore s:shard1 r:core_node6) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_t2] Registered new searcher Searcher@6051fcb[hdfsbackuprestore_shard1_replica_t2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 658519 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1612088036603985920
   [junit4]   2> 658519 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.UpdateHandler Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 658519 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 658521 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 658521 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 658548 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore/terms/shard1 to Terms{values={core_node5=0}, version=0}
   [junit4]   2> 658557 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.s.SolrIndexSearcher Opening [Searcher@72a2b175[hdfsbackuprestore_shard2_replica_t4] main]
   [junit4]   2> 658593 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 658593 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 658594 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 658594 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.UpdateLog Could not find max version in index or recent updates, using new clock 1612088036682629120
   [junit4]   2> 658612 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore/terms/shard1 to Terms{values={core_node6=0, core_node5=0}, version=1}
   [junit4]   2> 658630 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore/terms/shard2 to Terms{values={core_node7=0, core_node8=0}, version=1}
   [junit4]   2> 658666 INFO  (searcherExecutor-1496-thread-1-processing-n:127.0.0.1:38677_solr x:hdfsbackuprestore_shard2_replica_t4 c:hdfsbackuprestore s:shard2 r:core_node8) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_t4] Registered new searcher Searcher@72a2b175[hdfsbackuprestore_shard2_replica_t4] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 658668 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 658668 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 658668 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/
   [junit4]   2> 658795 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard1_replica_n1 url=https://127.0.0.1:33698/solr START replicas=[https://127.0.0.1:38677/solr/hdfsbackuprestore_shard1_replica_t2/] nUpdates=100
   [junit4]   2> 658811 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard1_replica_n1 url=https://127.0.0.1:33698/solr DONE.  We have no versions.  sync failed.
   [junit4]   2> 658850 INFO  (qtp152596348-3239) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_t2]  webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=0
   [junit4]   2> 658850 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate
   [junit4]   2> 658850 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 658850 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 658870 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/ shard1
   [junit4]   2> 659088 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 659090 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin} status=0 QTime=3894
   [junit4]   2> 659125 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 659125 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 659125 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SyncStrategy Sync replicas to https://127.0.0.1:33698/solr/hdfsbackuprestore_shard2_replica_n3/
   [junit4]   2> 659125 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard2_replica_n3 url=https://127.0.0.1:33698/solr START replicas=[https://127.0.0.1:38677/solr/hdfsbackuprestore_shard2_replica_t4/] nUpdates=100
   [junit4]   2> 659125 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.PeerSync PeerSync: core=hdfsbackuprestore_shard2_replica_n3 url=https://127.0.0.1:33698/solr DONE.  We have no versions.  sync failed.
   [junit4]   2> 659143 INFO  (qtp152596348-3235) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_t4]  webapp=/solr path=/get params={distrib=false&qt=/get&fingerprint=false&getVersions=100&wt=javabin&version=2} status=0 QTime=1
   [junit4]   2> 659143 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SyncStrategy Leader's attempt to sync with shard failed, moving to the next candidate
   [junit4]   2> 659143 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 659143 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext Found all replicas participating in election, clear LIR
   [junit4]   2> 659231 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ShardLeaderElectionContext I am the new leader: https://127.0.0.1:33698/solr/hdfsbackuprestore_shard2_replica_n3/ shard2
   [junit4]   2> 659360 INFO  (zkCallback-755-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 659396 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 659434 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_shard2_replica_n3&action=CREATE&numShards=2&shard=shard2&wt=javabin} status=0 QTime=4274
   [junit4]   2> 659543 INFO  (zkCallback-755-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 659617 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.ZkController hdfsbackuprestore_shard1_replica_t2 starting background replication from leader
   [junit4]   2> 659617 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.c.ReplicateFromLeader Will start replication from leader with poll interval: 00:00:03
   [junit4]   2> 659667 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.ReplicationHandler Poll scheduled at an interval of 3000ms
   [junit4]   2> 659668 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 659670 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.ZkController hdfsbackuprestore_shard2_replica_t4 starting background replication from leader
   [junit4]   2> 659670 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.c.ReplicateFromLeader Will start replication from leader with poll interval: 00:00:03
   [junit4]   2> 659761 INFO  (qtp152596348-3238) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node6&name=hdfsbackuprestore_shard1_replica_t2&action=CREATE&numShards=2&shard=shard1&wt=javabin} status=0 QTime=4094
   [junit4]   2> 659774 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.h.ReplicationHandler Poll scheduled at an interval of 3000ms
   [junit4]   2> 659774 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 659798 INFO  (zkCallback-755-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 659810 INFO  (zkCallback-769-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 659831 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node8&name=hdfsbackuprestore_shard2_replica_t4&action=CREATE&numShards=2&shard=shard2&wt=javabin} status=0 QTime=4327
   [junit4]   2> 659926 INFO  (qtp400397695-3245) [n:127.0.0.1:33698_solr    ] o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 30 seconds. Check all shard replicas
   [junit4]   2> 659946 INFO  (zkCallback-755-thread-2) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 659947 INFO  (zkCallback-769-thread-1) [    ] o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent state:SyncConnected type:NodeDataChanged path:/collections/hdfsbackuprestore/state.json] for collection [hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 660107 INFO  (OverseerCollectionConfigSetProcessor-73351559434469380-127.0.0.1:33698_solr-n_0000000000) [n:127.0.0.1:33698_solr    ] o.a.s.c.OverseerTaskQueue Response ZK path: /overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may have disconnected from ZooKeeper
   [junit4]   2> 660727 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Updated masterUrl to https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/
   [junit4]   2> 660783 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 660817 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 660817 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 660817 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 660817 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 660817 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
   [junit4]   2> 660819 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.s.SolrIndexSearcher Opening [Searcher@2896ac3d[hdfsbackuprestore_shard1_replica_t2] main]
   [junit4]   2> 660834 INFO  (searcherExecutor-1495-thread-1) [    ] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_t2] Registered new searcher Searcher@2896ac3d[hdfsbackuprestore_shard1_replica_t2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 660937 INFO  (qtp400397695-3245) [n:127.0.0.1:33698_solr    ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections params={collection.configName=conf1&version=2&pullReplicas=0&property.customKey=customValue&maxShardsPerNode=2&router.field=shard_s&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&numShards=2&tlogReplicas=1&wt=javabin} status=0 QTime=7066
   [junit4]   2> 661080 INFO  (qtp400397695-3246) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.ZkShardTerms Successful update of terms at /collections/hdfsbackuprestore/terms/shard2 to Terms{values={core_node7=1, core_node8=1}, version=2}
   [junit4]   2> 661163 INFO  (qtp152596348-3237) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_t4]  webapp=/solr path=/update params={update.distrib=FROMLEADER&distrib.from=https://127.0.0.1:33698/solr/hdfsbackuprestore_shard2_replica_n3/&wt=javabin&version=2}{add=[0 (1612088039202357248), 1 (1612088039205502976), 2 (1612088039205502977), 3 (1612088039206551552), 4 (1612088039206551553), 5 (1612088039206551554), 6 (1612088039206551555), 7 (1612088039206551556), 8 (1612088039206551557), 9 (1612088039206551558), ... (53 adds)]} 0 71
   [junit4]   2> 661170 INFO  (qtp400397695-3246) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/update params={wt=javabin&version=2}{add=[0 (1612088039202357248), 1 (1612088039205502976), 2 (1612088039205502977), 3 (1612088039206551552), 4 (1612088039206551553), 5 (1612088039206551554), 6 (1612088039206551555), 7 (1612088039206551556), 8 (1612088039206551557), 9 (1612088039206551558), ... (53 adds)]} 0 173
   [junit4]   2> 661303 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1612088039523221504,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 661303 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.SolrIndexWriter Calling setCommitData with IW:org.apache.solr.update.SolrIndexWriter@5741c304 commitCommandVersion:1612088039523221504
   [junit4]   2> 661309 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.TestInjection Start waiting for replica in sync with leader
   [junit4]   2> 661429 INFO  (qtp152596348-3239) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.TestInjection Start waiting for replica in sync with leader
   [junit4]   2> 661622 INFO  (qtp400397695-3246) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start commit{_version_=1612088039857717248,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 661732 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=273
   [junit4]   2> 661743 INFO  (qtp152596348-3241) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard1 r:core_node6 x:hdfsbackuprestore_shard1_replica_t2] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard1_replica_t2]  webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 434
   [junit4]   2> 661747 INFO  (qtp400397695-3246) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 661893 INFO  (qtp400397695-3246) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 661894 INFO  (qtp400397695-3246) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 272
   [junit4]   2> 661896 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=55
   [junit4]   2> 662501 INFO  (qtp400397695-3335) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 662620 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Updated masterUrl to https://127.0.0.1:33698/solr/hdfsbackuprestore_shard2_replica_n3/
   [junit4]   2> 662673 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 662730 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 662730 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 662730 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 662730 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 662730 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
   [junit4]   2> 662812 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.s.SolrIndexSearcher Opening [Searcher@a3e25af[hdfsbackuprestore_shard2_replica_t4] main]
   [junit4]   2> 662814 INFO  (searcherExecutor-1496-thread-1) [    ] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_t4] Registered new searcher Searcher@a3e25af[hdfsbackuprestore_shard2_replica_t4] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 662915 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.s.SolrIndexSearcher Opening [Searcher@c2c7ae5[hdfsbackuprestore_shard2_replica_n3] main]
   [junit4]   2> 662922 INFO  (searcherExecutor-1494-thread-1-processing-n:127.0.0.1:33698_solr x:hdfsbackuprestore_shard2_replica_n3 c:hdfsbackuprestore s:shard2 r:core_node7) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_n3] Registered new searcher Searcher@c2c7ae5[hdfsbackuprestore_shard2_replica_n3] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(8.0.0):C53)))}
   [junit4]   2> 662922 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 662922 INFO  (qtp400397695-3248) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 1619
   [junit4]   2> 663052 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=1
   [junit4]   2> 663594 INFO  (qtp400397695-3335) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 663730 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.S.Request [hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 663732 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Master's generation: 1
   [junit4]   2> 663732 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Master's version: 0
   [junit4]   2> 663732 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 663732 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 663732 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.h.IndexFetcher New index in Master. Deleting mine...
   [junit4]   2> 663732 INFO  (indexFetcher-1514-thread-1) [    ] o.a.s.s.SolrIndexSearcher Opening [Searcher@5c3339e4[hdfsbackuprestore_shard1_replica_t2] main]
   [junit4]   2> 663764 INFO  (searcherExecutor-1495-thread-1) [    ] o.a.s.c.SolrCore [hdfsbackuprestore_shard1_replica_t2] Registered new searcher Searcher@5c3339e4[hdfsbackuprestore_shard1_replica_t2] main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 664203 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 664754 INFO  (qtp400397695-3335) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 665320 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 665631 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=indexversion} status=0 QTime=0
   [junit4]   2> 665631 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Master's generation: 2
   [junit4]   2> 665631 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Master's version: 1537406959079
   [junit4]   2> 665631 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Slave's generation: 1
   [junit4]   2> 665631 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Slave's version: 0
   [junit4]   2> 665631 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Starting replication process
   [junit4]   2> 665667 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&tlogFiles=false&wt=javabin&version=2&command=filelist} status=0 QTime=35
   [junit4]   2> 665667 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Number of files in latest index in master: 17
   [junit4]   2> 665674 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Starting download (fullCopy=false) to MMapDirectory@/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_t4/data/index.20180920072923444 lockFactory=org.apache.lucene.store.NativeFSLockFactory@4c4b6a33
   [junit4]   2> 665688 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.si&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665690 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_FSTOrd50_0.doc&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665708 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_FSTOrd50_0.pos&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665710 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.tiv&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665726 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.nvd&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665741 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.pos&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665757 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.fdx&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665759 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.doc&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665798 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.fdt&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665800 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_Asserting_0.dvd&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665814 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_LuceneVarGapFixedInterval_0.tib&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665816 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_FSTOrd50_0.tix&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665835 INFO  (qtp400397695-3244) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_Asserting_0.dvm&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665837 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0_FSTOrd50_0.tbk&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665912 INFO  (qtp400397695-3242) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.nvm&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665926 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=_0.fnm&checksum=true&wt=filestream&command=filecontent} status=0 QTime=12
   [junit4]   2> 665929 INFO  (qtp400397695-3319) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={generation=2&qt=/replication&file=segments_2&checksum=true&wt=filestream&command=filecontent} status=0 QTime=0
   [junit4]   2> 665930 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Bytes downloaded: 5398, Bytes skipped downloading: 0
   [junit4]   2> 665930 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.h.IndexFetcher Total time taken for download (fullCopy=false,bytesDownloaded=5398) : 0 secs (null bytes/sec) to MMapDirectory@/home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_77C9E397208DA122-001/tempDir-002/node1/hdfsbackuprestore_shard2_replica_t4/data/index.20180920072923444 lockFactory=org.apache.lucene.store.NativeFSLockFactory@4c4b6a33
   [junit4]   2> 665969 INFO  (qtp400397695-3335) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=0
   [junit4]   2> 666117 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.u.DefaultSolrCoreState New IndexWriter is ready to be used.
   [junit4]   2> 666119 INFO  (indexFetcher-1516-thread-1) [    ] o.a.s.s.SolrIndexSearcher Opening [Searcher@59ab6e30[hdfsbackuprestore_shard2_replica_t4] main]
   [junit4]   2> 666121 INFO  (searcherExecutor-1496-thread-1) [    ] o.a.s.c.SolrCore [hdfsbackuprestore_shard2_replica_t4] Registered new searcher Searcher@59ab6e30[hdfsbackuprestore_shard2_replica_t4] main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(8.0.0):C53)))}
   [junit4]   2> 666531 INFO  (qtp400397695-3242) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard2 r:core_node7 x:hdfsbackuprestore_shard2_replica_n3] o.a.s.c.S.Request [hdfsbackuprestore_shard2_replica_n3]  webapp=/solr path=/replication params={qt=/replication&wt=javabin&version=2&command=details} status=0 QTime=1
   [junit4]   2> 666550 INFO  (qtp152596348-3239) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.TestInjection Waiting time for tlog replica to be in sync with leader: 5121
   [junit4]   2> 666550 INFO  (qtp152596348-3239) [n:127.0.0.1:38677_solr c:hdfsbackuprestore s:shard2 r:core_node8 x:hdfsbackuprestore_shard2_replica_t4] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard2_replica_t4]  webapp=/solr path=/update params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:33698/solr/hdfsbackuprestore_shard1_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=} 0 5122
   [junit4]   2> 666551 INFO  (qtp400397695-3247) [n:127.0.0.1:33698_solr c:hdfsbackuprestore s:shard1 r:core_node5 x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory [hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/update params={_stateVer_=hdfsbackuprestore:10&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=} 0 5368
   [junit4]   2> 666551 INFO  (TEST-TestHdfsCloudBackupRestore.test-seed#[77C9E397208DA122]) [    ] o.a.s.c.a.c.AbstractCloudBackupRestoreTestCase Indexed 53 docs to collection: hdfsbackupr

[...truncated too long message...]

ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/lucene/top-level-ivy-settings.xml

resolve:

jar-checksums:
    [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null2023587574
     [copy] Copying 238 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null2023587574
   [delete] Deleting directory /home/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-master/solr/null2023587574

check-working-copy:
[ivy:cachepath] :: resolving dependencies :: org.eclipse.jgit#org.eclipse.jgit-caller;working
[ivy:cachepath] 	confs: [default]
[ivy:cachepath] 	found org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public
[ivy:cachepath] 	found com.jcraft#jsch;0.1.53 in public
[ivy:cachepath] 	found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] 	found org.apache.httpcomponents#httpclient;4.3.6 in public
[ivy:cachepath] 	found org.apache.httpcomponents#httpcore;4.3.3 in public
[ivy:cachepath] 	found commons-logging#commons-logging;1.1.3 in public
[ivy:cachepath] 	found commons-codec#commons-codec;1.6 in public
[ivy:cachepath] 	found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 41ms :: artifacts dl 3ms
	---------------------------------------------------------------------
	|                  |            modules            ||   artifacts   |
	|       conf       | number| search|dwnlded|evicted|| number|dwnlded|
	---------------------------------------------------------------------
	|      default     |   8   |   0   |   0   |   0   ||   8   |   0   |
	---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
[wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation
[wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for further details.
[wc-checker] Checking working copy status...

-jenkins-base:

BUILD SUCCESSFUL
Total time: 206 minutes 23 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
	at hudson.FilePath$34.hasMatch(FilePath.java:2678)
	at hudson.FilePath$34.invoke(FilePath.java:2557)
	at hudson.FilePath$34.invoke(FilePath.java:2547)
	at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918)
Also:   hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene2
		at hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
		at hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
		at hudson.remoting.Channel.call(Channel.java:955)
		at hudson.FilePath.act(FilePath.java:1036)
		at hudson.FilePath.act(FilePath.java:1025)
		at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
		at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
		at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
		at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
		at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
		at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
		at hudson.model.Build$BuildExecution.post2(Build.java:186)
		at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
		at hudson.model.Run.execute(Run.java:1819)
		at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
		at hudson.model.ResourceController.execute(ResourceController.java:97)
		at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
	at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920)
	at hudson.remoting.UserRequest.perform(UserRequest.java:212)
	at hudson.remoting.UserRequest.perform(UserRequest.java:54)
	at hudson.remoting.Request$2.run(Request.java:369)
	at hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
	at java.util.concurrent.FutureTask.run(FutureTask.java:266)
	at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
	at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
	at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no matches found within 10000
	at hudson.FilePath.act(FilePath.java:1038)
	at hudson.FilePath.act(FilePath.java:1025)
	at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
	at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
	at hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
	at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
	at hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
	at hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
	at hudson.model.Build$BuildExecution.post2(Build.java:186)
	at hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
	at hudson.model.Run.execute(Run.java:1819)
	at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
	at hudson.model.ResourceController.execute(ResourceController.java:97)
	at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern "**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)