Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/23199/
Java: 64bit/jdk1.8.0_172 -XX:-UseCompressedOops -XX:+UseParallelGC

1 tests failed.
FAILED:  org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test

Error Message:
Error from server at http://127.0.0.1:41447/solr: create the collection time 
out:180s

Stack Trace:
org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error 
from server at http://127.0.0.1:41447/solr: create the collection time out:180s
        at 
__randomizedtesting.SeedInfo.seed([F18D3CB4E3862CCD:79D9036E4D7A4135]:0)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1107)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817)
        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:125)
        at 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test(TestHdfsCloudBackupRestore.java:213)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1742)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:935)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:971)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:985)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:944)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:830)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:880)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:891)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 13106 lines...]
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4]   2> 328290 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: 
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/init-core-data-001
   [junit4]   2> 328290 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=49 numCloses=49
   [junit4]   2> 328290 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) 
w/NUMERIC_DOCVALUES_SYSPROP=false
   [junit4]   2> 328291 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (true) via: 
@org.apache.solr.util.RandomizeSSL(reason=, value=NaN, ssl=NaN, clientAuth=NaN)
   [junit4]   2> 328777 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.h.u.NativeCodeLoader Unable to load native-hadoop library for your 
platform... using builtin-java classes where applicable
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 329348 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried 
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 329473 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Logging to org.apache.logging.slf4j.Log4jLogger@2299c455 via 
org.mortbay.log.Slf4jLog
   [junit4]   2> 329494 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 329744 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 329775 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
 to ./temp/Jetty_localhost_localdomain_37619_hdfs____.thbiuk/webapp
   [junit4]   2> 330624 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost.localdomain:37619
   [junit4]   2> 331228 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 331232 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 331240 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_37449_datanode____pas7r0/webapp
   [junit4]   2> 331671 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:37449
   [junit4]   2> 331951 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 331952 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 331967 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_35821_datanode____.fmxqdg/webapp
   [junit4]   2> 332446 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:35821
   [junit4]   2> 332455 ERROR (DataNode: 
[[[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-001/hdfsBaseDir/data/data1/,
 
[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-001/hdfsBaseDir/data/data2/]]
  heartbeating to localhost.localdomain/127.0.0.1:36973) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 332591 ERROR (DataNode: 
[[[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-001/hdfsBaseDir/data/data3/,
 
[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-001/hdfsBaseDir/data/data4/]]
  heartbeating to localhost.localdomain/127.0.0.1:36973) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 332603 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x525a9e0c2bf61: from storage 
DS-bd6afecd-9783-4707-9591-c59721a04db8 node 
DatanodeRegistration(127.0.0.1:33075, 
datanodeUuid=3911fce7-b800-46eb-9baa-0878a25ada6b, infoPort=34171, 
infoSecurePort=0, ipcPort=46799, 
storageInfo=lv=-56;cid=testClusterID;nsid=1987491932;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 3 msecs
   [junit4]   2> 332603 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x525a9dfcf1b0e: from storage 
DS-9d376446-eb27-4306-8fae-fc94ee9416f1 node 
DatanodeRegistration(127.0.0.1:37157, 
datanodeUuid=04d0cc2c-a871-4d95-a867-31dd01c33b49, infoPort=42455, 
infoSecurePort=0, ipcPort=46727, 
storageInfo=lv=-56;cid=testClusterID;nsid=1987491932;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 0 msecs
   [junit4]   2> 332603 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x525a9e0c2bf61: from storage 
DS-6ee130d7-5b8c-470d-84fd-e8d7a14cc4dc node 
DatanodeRegistration(127.0.0.1:33075, 
datanodeUuid=3911fce7-b800-46eb-9baa-0878a25ada6b, infoPort=34171, 
infoSecurePort=0, ipcPort=46799, 
storageInfo=lv=-56;cid=testClusterID;nsid=1987491932;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 332603 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x525a9dfcf1b0e: from storage 
DS-df65547f-6091-49c1-a714-c18e106a4778 node 
DatanodeRegistration(127.0.0.1:37157, 
datanodeUuid=04d0cc2c-a871-4d95-a867-31dd01c33b49, infoPort=42455, 
infoSecurePort=0, ipcPort=46727, 
storageInfo=lv=-56;cid=testClusterID;nsid=1987491932;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 333023 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002
   [junit4]   2> 333023 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 333024 INFO  (Thread-706) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 333024 INFO  (Thread-706) [    ] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 333025 ERROR (Thread-706) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 333124 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.c.ZkTestServer start zk server on port:34181
   [junit4]   2> 333126 INFO  (zkConnectionManagerCallback-980-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333130 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: 
d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
   [junit4]   2> 333130 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: 
d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@6d93ca32{/solr,null,AVAILABLE}
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@7e40af3{/solr,null,AVAILABLE}
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@47c74a9b{HTTP/1.1,[http/1.1]}{127.0.0.1:44383}
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@51eddab0{HTTP/1.1,[http/1.1]}{127.0.0.1:41447}
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.e.j.s.Server Started @333178ms
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.e.j.s.Server Started @333178ms
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, 
hostPort=44383}
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, 
hostPort=41447}
   [junit4]   2> 333133 ERROR (jetty-launcher-977-thread-2) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 333133 ERROR (jetty-launcher-977-thread-1) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 
8.0.0
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 
8.0.0
   [junit4]   2> 333133 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 333134 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 333134 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 333134 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 333134 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2018-11-13T02:02:14.623Z
   [junit4]   2> 333134 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2018-11-13T02:02:14.623Z
   [junit4]   2> 333135 INFO  (zkConnectionManagerCallback-982-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333135 INFO  (zkConnectionManagerCallback-984-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333135 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 333135 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 333139 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 333139 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 333218 INFO  (jetty-launcher-977-thread-2) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34181/solr
   [junit4]   2> 333223 INFO  (zkConnectionManagerCallback-988-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333229 INFO  (zkConnectionManagerCallback-990-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333255 INFO  (jetty-launcher-977-thread-1) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:34181/solr
   [junit4]   2> 333256 INFO  (zkConnectionManagerCallback-996-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333257 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] 
o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid 
0x100565abbad0005, likely client has closed socket
   [junit4]   2> 333259 INFO  (zkConnectionManagerCallback-998-thread-1) [    ] 
o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333265 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.OverseerElectionContext I am going to be 
the leader 127.0.0.1:41447_solr
   [junit4]   2> 333265 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.Overseer Overseer 
(id=72152541733650438-127.0.0.1:41447_solr-n_0000000000) starting
   [junit4]   2> 333269 INFO  (zkConnectionManagerCallback-1005-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333270 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster 
at 127.0.0.1:34181/solr ready
   [junit4]   2> 333270 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.Overseer Starting to work on the main 
queue : 127.0.0.1:41447_solr
   [junit4]   2> 333272 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:41447_solr
   [junit4]   2> 333273 INFO  (zkCallback-1004-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 333273 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Added backup 
repository with configuration params {type = repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:36973/solr,solr.hdfs.confdir=}}
   [junit4]   2> 333273 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Default 
configuration for backup repository is with configuration params {type = 
repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:36973/solr,solr.hdfs.confdir=}}
   [junit4]   2> 333275 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating 
transient cache for 2147483647 transient cores
   [junit4]   2> 333275 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:44383_solr
   [junit4]   2> 333276 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Added backup 
repository with configuration params {type = repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:36973/solr,solr.hdfs.confdir=}}
   [junit4]   2> 333276 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Default 
configuration for backup repository is with configuration params {type = 
repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://localhost.localdomain:36973/solr,solr.hdfs.confdir=}}
   [junit4]   2> 333276 INFO  (zkCallback-997-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 333279 INFO  (zkCallback-1004-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 333279 INFO  (zkCallback-989-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 333291 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.h.a.MetricsHistoryHandler No .system 
collection, keeping metrics history in memory.
   [junit4]   2> 333324 INFO  (zkConnectionManagerCallback-1011-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333325 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (2)
   [junit4]   2> 333325 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster 
at 127.0.0.1:34181/solr ready
   [junit4]   2> 333326 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.h.a.MetricsHistoryHandler No .system 
collection, keeping metrics history in memory.
   [junit4]   2> 333339 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 333341 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 333347 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 333347 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 333348 INFO  (jetty-launcher-977-thread-1) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.CorePropertiesLocator Found 0 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node1/.
   [junit4]   2> 333348 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 333349 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 333350 INFO  (jetty-launcher-977-thread-2) 
[n:127.0.0.1:44383_solr    ] o.a.s.c.CorePropertiesLocator Found 0 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/.
   [junit4]   2> 333387 INFO  (zkConnectionManagerCallback-1014-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333390 INFO  (zkConnectionManagerCallback-1019-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 333391 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 333391 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[F18D3CB4E3862CCD]-worker) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:34181/solr ready
   [junit4]   2> 333432 INFO  
(TEST-TestHdfsCloudBackupRestore.test-seed#[F18D3CB4E3862CCD]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 333433 INFO  (qtp583999274-3762) [n:127.0.0.1:41447_solr    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
pullReplicas=1&property.customKey=customValue&collection.configName=conf1&maxShardsPerNode=4&autoAddReplicas=true&name=hdfsbackuprestore&nrtReplicas=2&action=CREATE&numShards=2&tlogReplicas=1&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 333434 INFO  
(OverseerThreadFactory-1583-thread-1-processing-n:127.0.0.1:41447_solr) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.a.c.CreateCollectionCmd Create collection 
hdfsbackuprestore
   [junit4]   2> 333536 WARN  
(OverseerThreadFactory-1583-thread-1-processing-n:127.0.0.1:41447_solr) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.a.c.CreateCollectionCmd Specified number 
of replicas of 4 on collection hdfsbackuprestore is higher than the number of 
Solr instances currently live or live and part of your createNodeSet(2). It's 
unusual to run two replica of the same slice on the same Solr-instance.
   [junit4]   2> 333539 INFO  (qtp583999274-3753) [n:127.0.0.1:41447_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 333540 INFO  (qtp547918623-3763) [n:127.0.0.1:44383_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 333545 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333546 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_n2",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333547 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_t4",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>   "type":"TLOG",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333548 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_p6",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>   "type":"PULL",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333549 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_n8",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333549 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_n10",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333550 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_t12",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>   "type":"TLOG",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333551 INFO  
(OverseerStateUpdate-72152541733650438-127.0.0.1:41447_solr-n_0000000000) 
[n:127.0.0.1:41447_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_p14",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>   "type":"PULL",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 333754 INFO  (qtp583999274-3754) [n:127.0.0.1:41447_solr    
x:hdfsbackuprestore_shard1_replica_n2] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node5&name=hdfsbackuprestore_shard1_replica_n2&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 333754 INFO  (qtp583999274-3748) [n:127.0.0.1:41447_solr    
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node7&name=hdfsbackuprestore_shard1_replica_t4&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 333754 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr    
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node11&name=hdfsbackuprestore_shard2_replica_n8&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 333754 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr    
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 333755 INFO  (qtp583999274-3748) [n:127.0.0.1:41447_solr    
x:hdfsbackuprestore_shard1_replica_t4] o.a.s.c.TransientSolrCoreCacheDefault 
Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 333755 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr    
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=PULL&property.customKey=customValue&coreNodeName=core_node9&name=hdfsbackuprestore_shard1_replica_p6&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 333756 INFO  (qtp583999274-3751) [n:127.0.0.1:41447_solr    
x:hdfsbackuprestore_shard2_replica_n10] o.a.s.h.a.CoreAdminOperation core 
create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node13&name=hdfsbackuprestore_shard2_replica_n10&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 333756 INFO  (qtp583999274-3840) [n:127.0.0.1:41447_solr    
x:hdfsbackuprestore_shard2_replica_t12] o.a.s.h.a.CoreAdminOperation core 
create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=TLOG&property.customKey=customValue&coreNodeName=core_node15&name=hdfsbackuprestore_shard2_replica_t12&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 333757 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr    
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.a.CoreAdminOperation core 
create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=PULL&property.customKey=customValue&coreNodeName=core_node16&name=hdfsbackuprestore_shard2_replica_p14&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 334776 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 8.0.0
   [junit4]   2> 334777 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 8.0.0
   [junit4]   2> 334777 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 8.0.0
   [junit4]   2> 334778 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 8.0.0
   [junit4]   2> 334794 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard2_replica_n8] Schema name=minimal
   [junit4]   2> 334794 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard1_replica_n1] Schema name=minimal
   [junit4]   2> 334794 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard2_replica_p14] Schema name=minimal
   [junit4]   2> 334794 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard1_replica_p6] Schema name=minimal
   [junit4]   2> 334796 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 334796 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 334796 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.CoreContainer Creating SolrCore 
'hdfsbackuprestore_shard2_replica_n8' using configuration from collection 
hdfsbackuprestore, trusted=true
   [junit4]   2> 334796 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 
'hdfsbackuprestore_shard1_replica_n1' using configuration from collection 
hdfsbackuprestore, trusted=true
   [junit4]   2> 334797 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 334797 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 334797 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.CoreContainer Creating SolrCore 
'hdfsbackuprestore_shard1_replica_p6' using configuration from collection 
hdfsbackuprestore, trusted=true
   [junit4]   2> 334797 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.CoreContainer Creating SolrCore 
'hdfsbackuprestore_shard2_replica_p14' using configuration from collection 
hdfsbackuprestore, trusted=true
   [junit4]   2> 334797 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring 
for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry 
'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 334797 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.m.r.SolrJmxReporter JMX monitoring 
for 'solr.core.hdfsbackuprestore.shard2.replica_n8' (registry 
'solr.core.hdfsbackuprestore.shard2.replica_n8') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 334797 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.m.r.SolrJmxReporter JMX 
monitoring for 'solr.core.hdfsbackuprestore.shard2.replica_p14' (registry 
'solr.core.hdfsbackuprestore.shard2.replica_p14') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 334797 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.m.r.SolrJmxReporter JMX monitoring 
for 'solr.core.hdfsbackuprestore.shard1.replica_p6' (registry 
'solr.core.hdfsbackuprestore.shard1.replica_p6') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@576643ef
   [junit4]   2> 334809 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard1_replica_p6] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_p6],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_p6/data/]
   [junit4]   2> 334809 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard2_replica_p14] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_p14],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_p14/data/]
   [junit4]   2> 334809 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/hdfsbackuprestore_shard1_replica_n1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/./hdfsbackuprestore_shard1_replica_n1/data/]
   [junit4]   2> 334809 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard2_replica_n8] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_n8],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_F18D3CB4E3862CCD-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_n8/data/]
   [junit4]   2> 334901 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog 
implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 334901 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing 
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 334902 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 334902 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 334904 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@58a408b8[hdfsbackuprestore_shard1_replica_n1] main]
   [junit4]   2> 334905 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 334905 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 334907 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@7278d33e[hdfsbackuprestore_shard1_replica_p6] main]
   [junit4]   2> 334907 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 334907 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 334908 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@35481fd1[hdfsbackuprestore_shard2_replica_p14] main]
   [junit4]   2> 334911 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 334912 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 334912 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 334912 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 334912 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 334912 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 334912 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 334912 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 334912 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 334915 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max 
version in index or recent updates, using new clock 1616982349084360704
   [junit4]   2> 334917 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateHandler Using UpdateLog 
implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 334917 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateLog Initializing 
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 334917 INFO  
(searcherExecutor-1593-thread-1-processing-n:127.0.0.1:44383_solr 
x:hdfsbackuprestore_shard1_replica_n1 c:hdfsbackuprestore s:shard1 
r:core_node3) [n:127.0.0.1:44383_solr c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard1_replica_n1] Registered new searcher 
Searcher@58a408b8[hdfsbackuprestore_shard1_replica_n1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 334918 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 334918 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 334918 INFO  
(searcherExecutor-1594-thread-1-processing-n:127.0.0.1:44383_solr 
x:hdfsbackuprestore_shard1_replica_p6 c:hdfsbackuprestore s:shard1 
r:core_node9) [n:127.0.0.1:44383_solr c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard1_replica_p6] Registered new searcher 
Searcher@7278d33e[hdfsbackuprestore_shard1_replica_p6] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 334920 INFO  
(searcherExecutor-1595-thread-1-processing-n:127.0.0.1:44383_solr 
x:hdfsbackuprestore_shard2_replica_p14 c:hdfsbackuprestore s:shard2 
r:core_node16) [n:127.0.0.1:44383_solr c:hdfsbackuprestore s:shard2 
r:core_node16 x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard2_replica_p14] Registered new searcher 
Searcher@35481fd1[hdfsbackuprestore_shard2_replica_p14] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 334921 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.ZkController 
hdfsbackuprestore_shard1_replica_p6 starting background replication from leader
   [junit4]   2> 334921 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.c.ReplicateFromLeader Will start 
replication from leader with poll interval: 00:00:01
   [junit4]   2> 334921 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@2e4a6a9f[hdfsbackuprestore_shard2_replica_n8] main]
   [junit4]   2> 334921 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard1 to 
Terms{values={core_node3=0}, version=0}
   [junit4]   2> 334922 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 334922 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 334922 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 334923 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.ZkController 
hdfsbackuprestore_shard2_replica_p14 starting background replication from leader
   [junit4]   2> 334923 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.c.ReplicateFromLeader Will start 
replication from leader with poll interval: 00:00:01
   [junit4]   2> 334923 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext 
Waiting until we see more replicas up for shard shard1: total=3 found=1 
timeoutin=9999ms
   [junit4]   2> 334923 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.ReplicationHandler Poll 
scheduled at an interval of 1000ms
   [junit4]   2> 334923 INFO  (qtp547918623-3755) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node9 
x:hdfsbackuprestore_shard1_replica_p6] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 334924 INFO  
(searcherExecutor-1592-thread-1-processing-n:127.0.0.1:44383_solr 
x:hdfsbackuprestore_shard2_replica_n8 c:hdfsbackuprestore s:shard2 
r:core_node11) [n:127.0.0.1:44383_solr c:hdfsbackuprestore s:shard2 
r:core_node11 x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard2_replica_n8] Registered new searcher 
Searcher@2e4a6a9f[hdfsbackuprestore_shard2_replica_n8] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 334924 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.UpdateLog Could not find max 
version in index or recent updates, using new clock 1616982349093797888
   [junit4]   2> 334924 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.ReplicationHandler Poll 
scheduled at an interval of 1000ms
   [junit4]   2> 334924 INFO  (qtp547918623-3750) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node16 
x:hdfsbackuprestore_shard2_replica_p14] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 334927 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard2 to 
Terms{values={core_node11=0}, version=0}
   [junit4]   2> 334929 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ShardLeaderElectionContext 
Waiting until we see more replicas up for shard shard2: total=3 found=1 
timeoutin=9999ms
   [junit4]   2> 338997 ERROR (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.ReplicationHandler Index fetch failed 
:org.apache.solr.common.SolrException: No registered leader was found after 
waiting for 4000ms , collection: hdfsbackuprestore slice: shard2 saw 
state=DocCollection(hdfsbackuprestore//collections/hdfsbackuprestore/state.json/3)={
   [junit4]   2>   "pullReplicas":"1",
   [junit4]   2>   "replicationFactor":"2",
   [junit4]   2>   "shards":{
   [junit4]   2>     "shard1":{
   [junit4]   2>       "range":"80000000-ffffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node3":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node5":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n2",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node7":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_t4",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node9":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_p6",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}},
   [junit4]   2>     "shard2":{
   [junit4]   2>       "range":"0-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node11":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n8",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node13":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n10",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node15":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_t12",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node16":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_p14",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"4",
   [junit4]   2>   "autoAddReplicas":"true",
   [junit4]   2>   "nrtReplicas":"2",
   [junit4]   2>   "tlogReplicas":"1"} with live_nodes=[127.0.0.1:41447_solr, 
127.0.0.1:44383_solr]
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:866)
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:850)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:689)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:382)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:347)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:421)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$13(ReplicationHandler.java:1156)
   [junit4]   2>        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2>        at 
java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 338997 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 339989 ERROR (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.ReplicationHandler Index fetch failed 
:org.apache.solr.common.SolrException: No registered leader was found after 
waiting for 4000ms , collection: hdfsbackuprestore slice: shard1 saw 
state=DocCollection(hdfsbackuprestore//collections/hdfsbackuprestore/state.json/3)={
   [junit4]   2>   "pullReplicas":"1",
   [junit4]   2>   "replicationFactor":"2",
   [junit4]   2>   "shards":{
   [junit4]   2>     "shard1":{
   [junit4]   2>       "range":"80000000-ffffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node3":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node5":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n2",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node7":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_t4",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node9":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_p6",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}},
   [junit4]   2>     "shard2":{
   [junit4]   2>       "range":"0-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node11":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n8",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node13":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n10",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node15":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_t12",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node16":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_p14",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"4",
   [junit4]   2>   "autoAddReplicas":"true",
   [junit4]   2>   "nrtReplicas":"2",
   [junit4]   2>   "tlogReplicas":"1"} with live_nodes=[127.0.0.1:41447_solr, 
127.0.0.1:44383_solr]
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:866)
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:850)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:689)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:382)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:347)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:421)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$13(ReplicationHandler.java:1156)
   [junit4]   2>        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2>        at 
java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 339989 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 343089 ERROR (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.ReplicationHandler Index fetch failed 
:org.apache.solr.common.SolrException: No registered leader was found after 
waiting for 4000ms , collection: hdfsbackuprestore slice: shard2 saw 
state=DocCollection(hdfsbackuprestore//collections/hdfsbackuprestore/state.json/3)={
   [junit4]   2>   "pullReplicas":"1",
   [junit4]   2>   "replicationFactor":"2",
   [junit4]   2>   "shards":{
   [junit4]   2>     "shard1":{
   [junit4]   2>       "range":"80000000-ffffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node3":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node5":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n2",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node7":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_t4",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node9":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_p6",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}},
   [junit4]   2>     "shard2":{
   [junit4]   2>       "range":"0-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node11":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n8",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node13":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n10",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node15":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_t12",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node16":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_p14",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"4",
   [junit4]   2>   "autoAddReplicas":"true",
   [junit4]   2>   "nrtReplicas":"2",
   [junit4]   2>   "tlogReplicas":"1"} with live_nodes=[127.0.0.1:41447_solr, 
127.0.0.1:44383_solr]
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:866)
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:850)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:689)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:382)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:347)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:421)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$13(ReplicationHandler.java:1156)
   [junit4]   2>        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2>        at 
java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 343089 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 344060 ERROR (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.ReplicationHandler Index fetch failed 
:org.apache.solr.common.SolrException: No registered leader was found after 
waiting for 4000ms , collection: hdfsbackuprestore slice: shard1 saw 
state=DocCollection(hdfsbackuprestore//collections/hdfsbackuprestore/state.json/3)={
   [junit4]   2>   "pullReplicas":"1",
   [junit4]   2>   "replicationFactor":"2",
   [junit4]   2>   "shards":{
   [junit4]   2>     "shard1":{
   [junit4]   2>       "range":"80000000-ffffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node3":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node5":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_n2",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node7":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_t4",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node9":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard1_replica_p6",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}},
   [junit4]   2>     "shard2":{
   [junit4]   2>       "range":"0-7fffffff",
   [junit4]   2>       "state":"active",
   [junit4]   2>       "replicas":{
   [junit4]   2>         "core_node11":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n8",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node13":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_n10",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"NRT",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node15":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_t12",
   [junit4]   2>           "base_url":"http://127.0.0.1:41447/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:41447_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"TLOG",
   [junit4]   2>           "force_set_state":"false"},
   [junit4]   2>         "core_node16":{
   [junit4]   2>           "core":"hdfsbackuprestore_shard2_replica_p14",
   [junit4]   2>           "base_url":"http://127.0.0.1:44383/solr";,
   [junit4]   2>           "node_name":"127.0.0.1:44383_solr",
   [junit4]   2>           "state":"down",
   [junit4]   2>           "type":"PULL",
   [junit4]   2>           "force_set_state":"false"}}}},
   [junit4]   2>   "router":{"name":"compositeId"},
   [junit4]   2>   "maxShardsPerNode":"4",
   [junit4]   2>   "autoAddReplicas":"true",
   [junit4]   2>   "nrtReplicas":"2",
   [junit4]   2>   "tlogReplicas":"1"} with live_nodes=[127.0.0.1:41447_solr, 
127.0.0.1:44383_solr]
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:866)
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:850)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.getLeaderReplica(IndexFetcher.java:689)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:382)
   [junit4]   2>        at 
org.apache.solr.handler.IndexFetcher.fetchLatestIndex(IndexFetcher.java:347)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.doFetch(ReplicationHandler.java:421)
   [junit4]   2>        at 
org.apache.solr.handler.ReplicationHandler.lambda$setupPolling$13(ReplicationHandler.java:1156)
   [junit4]   2>        at 
java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
   [junit4]   2>        at 
java.util.concurrent.FutureTask.runAndReset(FutureTask.java:308)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.access$301(ScheduledThreadPoolExecutor.java:180)
   [junit4]   2>        at 
java.util.concurrent.ScheduledThreadPoolExecutor$ScheduledFutureTask.run(ScheduledThreadPoolExecutor.java:294)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
   [junit4]   2>        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 
   [junit4]   2> 344061 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 344938 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Was 
waiting for replicas to come up, but they are taking too long - assuming they 
won't come back till later
   [junit4]   2> 344938 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may 
be the new leader - try and sync
   [junit4]   2> 344938 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:44383/solr/hdfsbackuprestore_shard1_replica_n1/
   [junit4]   2> 344939 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync: 
core=hdfsbackuprestore_shard1_replica_n1 url=http://127.0.0.1:44383/solr START 
replicas=[http://127.0.0.1:41447/solr/hdfsbackuprestore_shard1_replica_n2/, 
http://127.0.0.1:41447/solr/hdfsbackuprestore_shard1_replica_t4/] nUpdates=100
   [junit4]   2> 344939 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.PeerSync PeerSync: 
core=hdfsbackuprestore_shard1_replica_n1 url=http://127.0.0.1:44383/solr DONE.  
We have no versions.  sync failed.
   [junit4]   2> 344941 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Leader's attempt to 
sync with shard failed, moving to the next candidate
   [junit4]   2> 344941 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext We 
failed sync, but we have no versions - we can't sync in that case - we were 
active before, so become leader anyway
   [junit4]   2> 344942 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ShardLeaderElectionContext Was 
waiting for replicas to come up, but they are taking too long - assuming they 
won't come back till later
   [junit4]   2> 344942 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ShardLeaderElectionContext I may 
be the new leader - try and sync
   [junit4]   2> 344942 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:44383/solr/hdfsbackuprestore_shard2_replica_n8/
   [junit4]   2> 344943 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.PeerSync PeerSync: 
core=hdfsbackuprestore_shard2_replica_n8 url=http://127.0.0.1:44383/solr START 
replicas=[http://127.0.0.1:41447/solr/hdfsbackuprestore_shard2_replica_n10/, 
http://127.0.0.1:41447/solr/hdfsbackuprestore_shard2_replica_t12/] nUpdates=100
   [junit4]   2> 344943 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.u.PeerSync PeerSync: 
core=hdfsbackuprestore_shard2_replica_n8 url=http://127.0.0.1:44383/solr DONE.  
We have no versions.  sync failed.
   [junit4]   2> 344943 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am 
the new leader: 
http://127.0.0.1:44383/solr/hdfsbackuprestore_shard1_replica_n1/ shard1
   [junit4]   2> 344945 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.SyncStrategy Leader's attempt to 
sync with shard failed, moving to the next candidate
   [junit4]   2> 344945 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ShardLeaderElectionContext We 
failed sync, but we have no versions - we can't sync in that case - we were 
active before, so become leader anyway
   [junit4]   2> 344947 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ShardLeaderElectionContext I am 
the new leader: 
http://127.0.0.1:44383/solr/hdfsbackuprestore_shard2_replica_n8/ shard2
   [junit4]   2> 344969 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 344995 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no 
recovery necessary
   [junit4]   2> 345003 INFO  (qtp547918623-3752) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null 
path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin}
 status=0 QTime=11248
   [junit4]   2> 345010 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345010 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345015 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345015 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345021 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345021 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345027 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345027 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345032 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345032 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345037 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345037 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345042 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345042 INFO  (indexFetcher-1611-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node3 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345098 INFO  (zkCallback-989-thread-3) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 345106 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node11 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345112 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345112 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node11 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345118 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345118 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node11 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345124 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345124 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node11 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345129 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]   2> 345129 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Replica core_node11 is leader but it's state is down, 
skipping replication
   [junit4]   2> 345147 INFO  (qtp547918623-3749) [n:127.0.0.1:44383_solr 
c:hdfsbackuprestore s:shard2 r:core_node11 
x:hdfsbackuprestore_shard2_replica_n8] o.a.s.c.ZkController I am the leader, no 
recovery necessary
   [junit4]   2> 345186 INFO  (indexFetcher-1613-thread-1) [    ] 
o.a.s.h.IndexFetcher Last replication failed, so I'll force replication
   [junit4]

[...truncated too long message...]

ource files to 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/classes/java

common-solr.compile-core:

compile-core:

compile-test-framework:

-check-git-state:

-git-cleanroot:

-copy-git-state:

git-autoclean:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/top-level-ivy-settings.xml

resolve:

init:

compile-lucene-core:

compile-codecs:

-check-git-state:

-git-cleanroot:

-copy-git-state:

git-autoclean:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/top-level-ivy-settings.xml

resolve:

common.init:

compile-lucene-core:

init:

-clover.disable:

-clover.load:

-clover.classpath:

-clover.setup:

clover:

compile-core:

-clover.disable:

-clover.load:

-clover.classpath:

-clover.setup:

clover:

common.compile-core:

compile-core:

common.compile-test:
    [mkdir] Created dir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/classes/test
    [javac] Compiling 934 source files to 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/classes/test
    [javac] Note: Some input files use or override a deprecated API.
    [javac] Note: Recompile with -Xlint:deprecation for details.
    [javac] Note: Some input files use unchecked or unsafe operations.
    [javac] Note: Recompile with -Xlint:unchecked for details.
    [javac] Creating empty 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/classes/test/org/apache/solr/cloud/autoscaling/sim/package-info.class
     [copy] Copying 1 file to 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/classes/test

common-solr.compile-test:

compile-test:

BUILD SUCCESSFUL
Total time: 49 seconds
[repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=5 
-Dtests.class="*.TestHdfsCloudBackupRestore" -Dtests.showOutput=onerror 
"-Dargs=-XX:-UseCompressedOops -XX:+UseParallelGC" 
-Dtests.seed=F18D3CB4E3862CCD -Dtests.multiplier=3 -Dtests.slow=true 
-Dtests.locale=vi -Dtests.timezone=SystemV/EST5EDT -Dtests.asserts=true 
-Dtests.file.encoding=UTF-8
Buildfile: /home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/build.xml

-clover.disable:

ivy-configure:
[ivy:configure] :: Apache Ivy 2.4.0 - 20141213170938 :: 
http://ant.apache.org/ivy/ ::
[ivy:configure] :: loading settings :: file = 
/home/jenkins/workspace/Lucene-Solr-master-Linux/lucene/top-level-ivy-settings.xml

install-junit4-taskdef:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

resolve-groovy:
[ivy:cachepath] :: resolving dependencies :: 
org.codehaus.groovy#groovy-all-caller;working
[ivy:cachepath]         confs: [default]
[ivy:cachepath]         found org.codehaus.groovy#groovy-all;2.4.15 in public
[ivy:cachepath] :: resolution report :: resolve 13ms :: artifacts dl 0ms
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   1   |   0   |   0   |   0   ||   1   |   0   |
        ---------------------------------------------------------------------

-init-totals:

-test:
    [mkdir] Created dir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test
[junit4:pickseed] Seed property 'tests.seed' already defined: F18D3CB4E3862CCD
    [mkdir] Created dir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/temp
   [junit4] <JUnit4> says שלום! Master seed: F18D3CB4E3862CCD
   [junit4] Executing 5 suites with 3 JVMs.
   [junit4] 
   [junit4] Started J1 PID(8...@serv1.sd-datasolutions.de).
   [junit4] Started J2 PID(8...@serv1.sd-datasolutions.de).
   [junit4] Started J0 PID(8...@serv1.sd-datasolutions.de).
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4] OK      12.9s J1 | TestHdfsCloudBackupRestore.test
   [junit4] Completed [1/5] on J1 in 33.23s, 1 test
   [junit4] 
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4] OK      13.0s J0 | TestHdfsCloudBackupRestore.test
   [junit4] Completed [2/5] on J0 in 33.57s, 1 test
   [junit4] 
   [junit4] Duplicate suite name used with XML reports: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore. This may 
confuse tools that process XML reports. Set 'ignoreDuplicateSuites' to true to 
skip this message.
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4] OK      13.5s J2 | TestHdfsCloudBackupRestore.test
   [junit4] Completed [3/5] on J2 in 34.15s, 1 test
   [junit4] 
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4] OK      9.94s J1 | TestHdfsCloudBackupRestore.test
   [junit4] Completed [4/5] on J1 in 23.00s, 1 test
   [junit4] 
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4] OK      12.0s J0 | TestHdfsCloudBackupRestore.test
   [junit4] Completed [5/5] on J0 in 24.94s, 1 test
   [junit4] 
   [junit4] JVM J0:     0.42 ..    59.83 =    59.41s
   [junit4] JVM J1:     0.42 ..    57.62 =    57.20s
   [junit4] JVM J2:     0.42 ..    35.28 =    34.86s
   [junit4] Execution time total: 59 seconds
   [junit4] Tests summary: 5 suites, 5 tests
   [junit4] Could not remove temporary path: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1 
(java.nio.file.DirectoryNotEmptyException: Remaining files: 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp])
   [junit4] Could not remove temporary path: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2 
(java.nio.file.DirectoryNotEmptyException: Remaining files: 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J2/temp])
   [junit4] Could not remove temporary path: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0 
(java.nio.file.DirectoryNotEmptyException: Remaining files: 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp])
     [echo] 5 slowest tests:
[junit4:tophints] 440.92s | org.apache.solr.cloud.api.collections.ShardSplitTest
[junit4:tophints] 436.73s | org.apache.solr.cloud.cdcr.CdcrOpsAndBoundariesTest
[junit4:tophints] 320.79s | org.apache.solr.cloud.cdcr.CdcrWithNodesRestartsTest
[junit4:tophints] 153.71s | 
org.apache.solr.cloud.autoscaling.IndexSizeTriggerTest
[junit4:tophints] 116.42s | org.apache.solr.update.SoftAutoCommitTest

-check-totals:

test-nocompile:

BUILD SUCCESSFUL
Total time: 1 minute 1 second
[repro] Failures:
[repro]   0/5 failed: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
[repro] Exiting with code 0
+ mv lucene/build lucene/build.repro
+ mv solr/build solr/build.repro
+ mv lucene/build.orig lucene/build
+ mv solr/build.orig solr/build
Archiving artifacts
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath$ValidateAntFileMask.hasMatch(FilePath.java:2845)
        at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2724)
        at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2705)
        at hudson.FilePath.act(FilePath.java:1076)
        at hudson.FilePath.act(FilePath.java:1059)
        at hudson.FilePath.validateAntFileMask(FilePath.java:2703)
        at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
        at 
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
        at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
        at hudson.model.Build$BuildExecution.post2(Build.java:186)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
        at hudson.model.Run.execute(Run.java:1835)
        at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
        at hudson.model.ResourceController.execute(ResourceController.java:97)
        at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern 
"**/*.events,heapdumps/**,**/*_pid*.log". Configuration error?
[WARNINGS] Parsing warnings in console log with parser Java Compiler (javac)
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
<Git Blamer> Using GitBlamer to create author and commit information for all 
warnings.
<Git Blamer> GIT_COMMIT=e81dd4e870d2a9b27e1f4366e92daa6dba054da8, 
workspace=/var/lib/jenkins/workspace/Lucene-Solr-master-Linux
[WARNINGS] Computing warning deltas based on reference build #23198
Recording test results
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
Setting 
ANT_1_8_2_HOME=/var/lib/jenkins/tools/hudson.tasks.Ant_AntInstallation/ANT_1.8.2
---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

Reply via email to