Build: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-7.x/191/

2 tests failed.
FAILED:  org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test

Error Message:
Node 127.0.0.1:37546_solr has 3 replicas. Expected num replicas : 2. state:  
DocCollection(hdfsbackuprestore_restored//collections/hdfsbackuprestore_restored/state.json/11)={
   "pullReplicas":0,   "replicationFactor":1,   "shards":{     "shard2":{       
"range":"0-7fffffff",       "state":"active",       "replicas":{"core_node62":{ 
          "core":"hdfsbackuprestore_restored_shard2_replica_n61",           
"base_url":"https://127.0.0.1:37546/solr";,           
"node_name":"127.0.0.1:37546_solr",           "state":"active",           
"type":"NRT",           "force_set_state":"false",           "leader":"true"}}, 
      "stateTimestamp":"1540007765196772570"},     "shard1_1":{       
"range":"c0000000-ffffffff",       "state":"active",       
"replicas":{"core_node64":{           
"core":"hdfsbackuprestore_restored_shard1_1_replica_n63",           
"base_url":"https://127.0.0.1:37546/solr";,           
"node_name":"127.0.0.1:37546_solr",           "state":"active",           
"type":"NRT",           "force_set_state":"false",           "leader":"true"}}, 
      "stateTimestamp":"1540007765196804523"},     "shard1_0":{       
"range":"80000000-bfffffff",       "state":"active",       
"replicas":{"core_node66":{           
"core":"hdfsbackuprestore_restored_shard1_0_replica_n65",           
"base_url":"https://127.0.0.1:37546/solr";,           
"node_name":"127.0.0.1:37546_solr",           "state":"active",           
"type":"NRT",           "force_set_state":"false",           "leader":"true"}}, 
      "stateTimestamp":"1540007765196829740"}},   "router":{     
"name":"compositeId",     "field":"shard_s"},   "maxShardsPerNode":"-1",   
"autoAddReplicas":"false",   "nrtReplicas":1,   "tlogReplicas":0}

Stack Trace:
java.lang.AssertionError: Node 127.0.0.1:37546_solr has 3 replicas. Expected 
num replicas : 2. state: 
DocCollection(hdfsbackuprestore_restored//collections/hdfsbackuprestore_restored/state.json/11)={
  "pullReplicas":0,
  "replicationFactor":1,
  "shards":{
    "shard2":{
      "range":"0-7fffffff",
      "state":"active",
      "replicas":{"core_node62":{
          "core":"hdfsbackuprestore_restored_shard2_replica_n61",
          "base_url":"https://127.0.0.1:37546/solr";,
          "node_name":"127.0.0.1:37546_solr",
          "state":"active",
          "type":"NRT",
          "force_set_state":"false",
          "leader":"true"}},
      "stateTimestamp":"1540007765196772570"},
    "shard1_1":{
      "range":"c0000000-ffffffff",
      "state":"active",
      "replicas":{"core_node64":{
          "core":"hdfsbackuprestore_restored_shard1_1_replica_n63",
          "base_url":"https://127.0.0.1:37546/solr";,
          "node_name":"127.0.0.1:37546_solr",
          "state":"active",
          "type":"NRT",
          "force_set_state":"false",
          "leader":"true"}},
      "stateTimestamp":"1540007765196804523"},
    "shard1_0":{
      "range":"80000000-bfffffff",
      "state":"active",
      "replicas":{"core_node66":{
          "core":"hdfsbackuprestore_restored_shard1_0_replica_n65",
          "base_url":"https://127.0.0.1:37546/solr";,
          "node_name":"127.0.0.1:37546_solr",
          "state":"active",
          "type":"NRT",
          "force_set_state":"false",
          "leader":"true"}},
      "stateTimestamp":"1540007765196829740"}},
  "router":{
    "name":"compositeId",
    "field":"shard_s"},
  "maxShardsPerNode":"-1",
  "autoAddReplicas":"false",
  "nrtReplicas":1,
  "tlogReplicas":0}
        at 
__randomizedtesting.SeedInfo.seed([8607C92A69D883BC:E53F6F0C724EE44]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at org.junit.Assert.assertTrue(Assert.java:43)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.lambda$testBackupAndRestore$1(AbstractCloudBackupRestoreTestCase.java:339)
        at java.util.HashMap.forEach(HashMap.java:1289)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:338)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:144)
        at 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore.test(TestHdfsCloudBackupRestore.java:213)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1742)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:935)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:971)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:985)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:944)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:830)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:880)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:891)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)


FAILED:  
org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore.test

Error Message:
Node 127.0.0.1:42055_solr has 3 replicas. Expected num replicas : 2. state:  
DocCollection(backuprestore_restored//collections/backuprestore_restored/state.json/12)={
   "pullReplicas":0,   "replicationFactor":1,   "shards":{     "shard2":{       
"range":"0-7fffffff",       "state":"active",       "replicas":{"core_node62":{ 
          "core":"backuprestore_restored_shard2_replica_n61",           
"base_url":"http://127.0.0.1:42055/solr";,           
"node_name":"127.0.0.1:42055_solr",           "state":"active",           
"type":"NRT",           "force_set_state":"false",           "leader":"true"}}, 
      "stateTimestamp":"1540008097310485489"},     "shard1_1":{       
"range":"c0000000-ffffffff",       "state":"active",       
"replicas":{"core_node64":{           
"core":"backuprestore_restored_shard1_1_replica_n63",           
"base_url":"http://127.0.0.1:42055/solr";,           
"node_name":"127.0.0.1:42055_solr",           "state":"active",           
"type":"NRT",           "force_set_state":"false",           "leader":"true"}}, 
      "stateTimestamp":"1540008097310520161"},     "shard1_0":{       
"range":"80000000-bfffffff",       "state":"active",       
"replicas":{"core_node66":{           
"core":"backuprestore_restored_shard1_0_replica_n65",           
"base_url":"http://127.0.0.1:42055/solr";,           
"node_name":"127.0.0.1:42055_solr",           "state":"active",           
"type":"NRT",           "force_set_state":"false",           "leader":"true"}}, 
      "stateTimestamp":"1540008097310543083"}},   "router":{     
"name":"compositeId",     "field":"shard_s"},   "maxShardsPerNode":"-1",   
"autoAddReplicas":"false",   "nrtReplicas":1,   "tlogReplicas":0}

Stack Trace:
java.lang.AssertionError: Node 127.0.0.1:42055_solr has 3 replicas. Expected 
num replicas : 2. state: 
DocCollection(backuprestore_restored//collections/backuprestore_restored/state.json/12)={
  "pullReplicas":0,
  "replicationFactor":1,
  "shards":{
    "shard2":{
      "range":"0-7fffffff",
      "state":"active",
      "replicas":{"core_node62":{
          "core":"backuprestore_restored_shard2_replica_n61",
          "base_url":"http://127.0.0.1:42055/solr";,
          "node_name":"127.0.0.1:42055_solr",
          "state":"active",
          "type":"NRT",
          "force_set_state":"false",
          "leader":"true"}},
      "stateTimestamp":"1540008097310485489"},
    "shard1_1":{
      "range":"c0000000-ffffffff",
      "state":"active",
      "replicas":{"core_node64":{
          "core":"backuprestore_restored_shard1_1_replica_n63",
          "base_url":"http://127.0.0.1:42055/solr";,
          "node_name":"127.0.0.1:42055_solr",
          "state":"active",
          "type":"NRT",
          "force_set_state":"false",
          "leader":"true"}},
      "stateTimestamp":"1540008097310520161"},
    "shard1_0":{
      "range":"80000000-bfffffff",
      "state":"active",
      "replicas":{"core_node66":{
          "core":"backuprestore_restored_shard1_0_replica_n65",
          "base_url":"http://127.0.0.1:42055/solr";,
          "node_name":"127.0.0.1:42055_solr",
          "state":"active",
          "type":"NRT",
          "force_set_state":"false",
          "leader":"true"}},
      "stateTimestamp":"1540008097310543083"}},
  "router":{
    "name":"compositeId",
    "field":"shard_s"},
  "maxShardsPerNode":"-1",
  "autoAddReplicas":"false",
  "nrtReplicas":1,
  "tlogReplicas":0}
        at 
__randomizedtesting.SeedInfo.seed([8607C92A69D883BC:E53F6F0C724EE44]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at org.junit.Assert.assertTrue(Assert.java:43)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.lambda$testBackupAndRestore$1(AbstractCloudBackupRestoreTestCase.java:339)
        at java.util.HashMap.forEach(HashMap.java:1289)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.testBackupAndRestore(AbstractCloudBackupRestoreTestCase.java:338)
        at 
org.apache.solr.cloud.api.collections.AbstractCloudBackupRestoreTestCase.test(AbstractCloudBackupRestoreTestCase.java:144)
        at 
org.apache.solr.cloud.api.collections.TestLocalFSCloudBackupRestore.test(TestLocalFSCloudBackupRestore.java:64)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1742)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:935)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:971)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:985)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:944)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:830)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:880)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:891)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 12842 lines...]
   [junit4] Suite: 
org.apache.solr.cloud.api.collections.TestHdfsCloudBackupRestore
   [junit4]   2> Creating dataDir: 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/init-core-data-001
   [junit4]   2> 1206810 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=4 numCloses=4
   [junit4]   2> 1206810 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) 
w/NUMERIC_DOCVALUES_SYSPROP=true
   [junit4]   2> 1206811 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: 
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
   [junit4]   2> 1206812 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: 
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 1206900 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried 
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 1206930 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 1206931 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 1206950 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
 to ./temp/Jetty_lucene2.us.west_apache_org_46401_hdfs____llkdst/webapp
   [junit4]   2> 1207517 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log Started 
HttpServer2$selectchannelconnectorwithsafestar...@lucene2-us-west.apache.org:46401
   [junit4]   2> 1207593 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 1207594 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 1207605 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_35888_datanode____.sqlg1v/webapp
   [junit4]   2> 1207921 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:35888
   [junit4]   2> 1207952 WARN  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 1207953 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 1207979 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_40695_datanode____.7t4sw3/webapp
   [junit4]   2> 1208119 ERROR (DataNode: 
[[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-001/hdfsBaseDir/data/data1/,
 
[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-001/hdfsBaseDir/data/data2/]]
  heartbeating to lucene2-us-west.apache.org/127.0.0.1:44791) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 1208162 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x4f62cc724dfaa4: from storage 
DS-4565e7a1-b845-4235-903c-26c54ed4ef74 node 
DatanodeRegistration(127.0.0.1:38302, 
datanodeUuid=ab946531-cf07-4950-a722-88c93297d864, infoPort=42644, 
infoSecurePort=0, ipcPort=37744, 
storageInfo=lv=-56;cid=testClusterID;nsid=1260333824;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 3 msecs
   [junit4]   2> 1208164 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x4f62cc724dfaa4: from storage 
DS-d0fdba2d-d25c-4b63-9c95-520c4e5a7507 node 
DatanodeRegistration(127.0.0.1:38302, 
datanodeUuid=ab946531-cf07-4950-a722-88c93297d864, infoPort=42644, 
infoSecurePort=0, ipcPort=37744, 
storageInfo=lv=-56;cid=testClusterID;nsid=1260333824;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 1208438 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:40695
   [junit4]   2> 1208770 ERROR (DataNode: 
[[[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-001/hdfsBaseDir/data/data3/,
 
[DISK]file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-001/hdfsBaseDir/data/data4/]]
  heartbeating to lucene2-us-west.apache.org/127.0.0.1:44791) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 1208794 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x4f62cc9889b177: from storage 
DS-da5eeadd-2217-493c-8a75-d28ba0c187d8 node 
DatanodeRegistration(127.0.0.1:43820, 
datanodeUuid=7bb8d74f-9b5b-42c0-9060-d69a9261dd79, infoPort=35447, 
infoSecurePort=0, ipcPort=45229, 
storageInfo=lv=-56;cid=testClusterID;nsid=1260333824;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 0 msecs
   [junit4]   2> 1208795 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x4f62cc9889b177: from storage 
DS-fbcac7f5-5a46-4c8b-9186-0899ff6090e1 node 
DatanodeRegistration(127.0.0.1:43820, 
datanodeUuid=7bb8d74f-9b5b-42c0-9060-d69a9261dd79, infoPort=35447, 
infoSecurePort=0, ipcPort=45229, 
storageInfo=lv=-56;cid=testClusterID;nsid=1260333824;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 1209169 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.c.MiniSolrCloudCluster Starting cluster of 2 servers in 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002
   [junit4]   2> 1209169 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 1209171 INFO  (Thread-1260) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 1209171 INFO  (Thread-1260) [    ] o.a.s.c.ZkTestServer 
Starting server
   [junit4]   2> 1209179 ERROR (Thread-1260) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 1209271 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.c.ZkTestServer start zk server on port:44842
   [junit4]   2> 1209274 INFO  (zkConnectionManagerCallback-1439-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209278 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: 
d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
   [junit4]   2> 1209280 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: 
d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_172-b11
   [junit4]   2> 1209281 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1209281 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1209281 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 1209281 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 1209281 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 1209281 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.session node0 Scavenging every 660000ms
   [junit4]   2> 1209282 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1e68d021{/solr,null,AVAILABLE}
   [junit4]   2> 1209282 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@5ab892b3{/solr,null,AVAILABLE}
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@b9c98da{SSL,[ssl, 
http/1.1]}{127.0.0.1:41505}
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.e.j.s.Server Started @1209357ms
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, 
hostPort=41505}
   [junit4]   2> 1209287 ERROR (jetty-launcher-1436-thread-1) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
7.6.0
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1209287 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2018-10-20T03:55:53.588Z
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@597e5d2{SSL,[ssl, 
http/1.1]}{127.0.0.1:37546}
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.e.j.s.Server Started @1209358ms
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/solr, 
hostPort=37546}
   [junit4]   2> 1209288 ERROR (jetty-launcher-1436-thread-2) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
7.6.0
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 1209288 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2018-10-20T03:55:53.589Z
   [junit4]   2> 1209289 INFO  (zkConnectionManagerCallback-1441-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209289 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 1209295 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 1209296 INFO  (zkConnectionManagerCallback-1443-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209296 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.s.SolrDispatchFilter solr.xml found in ZooKeeper. Loading...
   [junit4]   2> 1209299 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 1209348 INFO  (jetty-launcher-1436-thread-2) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:44842/solr
   [junit4]   2> 1209378 INFO  (zkConnectionManagerCallback-1447-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209380 INFO  (zkConnectionManagerCallback-1449-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209439 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.OverseerElectionContext I am going to be 
the leader 127.0.0.1:37546_solr
   [junit4]   2> 1209440 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.Overseer Overseer 
(id=73522006077341700-127.0.0.1:37546_solr-n_0000000000) starting
   [junit4]   2> 1209470 INFO  (zkConnectionManagerCallback-1456-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209471 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster 
at 127.0.0.1:44842/solr ready
   [junit4]   2> 1209479 INFO  
(OverseerStateUpdate-73522006077341700-127.0.0.1:37546_solr-n_0000000000) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.Overseer Starting to work on the main 
queue : 127.0.0.1:37546_solr
   [junit4]   2> 1209479 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:37546_solr
   [junit4]   2> 1209495 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Added backup 
repository with configuration params {type = repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:44791/solr,solr.hdfs.confdir=}}
   [junit4]   2> 1209495 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Default 
configuration for backup repository is with configuration params {type = 
repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:44791/solr,solr.hdfs.confdir=}}
   [junit4]   2> 1209502 INFO  
(OverseerStateUpdate-73522006077341700-127.0.0.1:37546_solr-n_0000000000) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 1209510 INFO  (jetty-launcher-1436-thread-1) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:44842/solr
   [junit4]   2> 1209512 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.h.a.MetricsHistoryHandler No .system 
collection, keeping metrics history in memory.
   [junit4]   2> 1209518 INFO  (zkCallback-1455-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 1209528 INFO  (zkConnectionManagerCallback-1461-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209530 INFO  (zkConnectionManagerCallback-1463-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209531 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1209537 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1209537 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1209538 INFO  (jetty-launcher-1436-thread-2) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.CorePropertiesLocator Found 0 core 
definitions underneath 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node2/.
   [junit4]   2> 1209539 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 1209543 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.TransientSolrCoreCacheDefault Allocating 
transient cache for 2147483647 transient cores
   [junit4]   2> 1209543 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:41505_solr
   [junit4]   2> 1209551 INFO  (zkCallback-1448-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1209551 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Added backup 
repository with configuration params {type = repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:44791/solr,solr.hdfs.confdir=}}
   [junit4]   2> 1209551 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.b.r.BackupRepositoryFactory Default 
configuration for backup repository is with configuration params {type = 
repository,name = hdfs,class = 
org.apache.solr.core.backup.repository.HdfsBackupRepository,attributes = 
{name=hdfs, 
class=org.apache.solr.core.backup.repository.HdfsBackupRepository},args = 
{location=/backup,solr.hdfs.home=hdfs://lucene2-us-west.apache.org:44791/solr,solr.hdfs.confdir=}}
   [junit4]   2> 1209562 INFO  (zkCallback-1455-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1209562 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 1209568 INFO  (zkConnectionManagerCallback-1470-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209569 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (2)
   [junit4]   2> 1209569 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster 
at 127.0.0.1:44842/solr ready
   [junit4]   2> 1209570 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.h.a.MetricsHistoryHandler No .system 
collection, keeping metrics history in memory.
   [junit4]   2> 1209584 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1209591 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1209591 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1209593 INFO  (jetty-launcher-1436-thread-1) 
[n:127.0.0.1:41505_solr    ] o.a.s.c.CorePropertiesLocator Found 0 core 
definitions underneath 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/.
   [junit4]   2> 1209614 INFO  (zkConnectionManagerCallback-1473-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209618 INFO  (zkConnectionManagerCallback-1478-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 1209618 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 1209648 INFO  
(SUITE-TestHdfsCloudBackupRestore-seed#[8607C92A69D883BC]-worker) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:44842/solr ready
   [junit4]   2> 1209699 INFO  
(TEST-TestHdfsCloudBackupRestore.test-seed#[8607C92A69D883BC]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 1209716 INFO  (qtp835890382-5574) [n:127.0.0.1:37546_solr    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
pullReplicas=0&property.customKey=customValue&collection.configName=conf1&maxShardsPerNode=-1&router.field=shard_s&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&numShards=2&tlogReplicas=0&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 1209718 INFO  
(OverseerThreadFactory-2339-thread-1-processing-n:127.0.0.1:37546_solr) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.a.c.CreateCollectionCmd Create collection 
hdfsbackuprestore
   [junit4]   2> 1209829 INFO  (qtp835890382-5579) [n:127.0.0.1:37546_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 1209848 INFO  (qtp1719133957-5575) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 1209853 INFO  
(OverseerStateUpdate-73522006077341700-127.0.0.1:37546_solr-n_0000000000) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_replica_n1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:41505/solr";,
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 1209857 INFO  
(OverseerStateUpdate-73522006077341700-127.0.0.1:37546_solr-n_0000000000) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"ADDREPLICA",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard2",
   [junit4]   2>   "core":"hdfsbackuprestore_shard2_replica_n2",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:37546/solr";,
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "waitForFinalState":"false"} 
   [junit4]   2> 1210067 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin
   [junit4]   2> 1210073 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr    
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node4&name=hdfsbackuprestore_shard2_replica_n2&action=CREATE&numShards=2&shard=shard2&wt=javabin
   [junit4]   2> 1210073 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr    
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.TransientSolrCoreCacheDefault 
Allocating transient cache for 2147483647 transient cores
   [junit4]   2> 1211089 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 7.6.0
   [junit4]   2> 1211094 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 7.6.0
   [junit4]   2> 1211104 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard2_replica_n2] Schema name=minimal
   [junit4]   2> 1211107 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard1_replica_n1] Schema name=minimal
   [junit4]   2> 1211109 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 1211109 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.CoreContainer Creating SolrCore 
'hdfsbackuprestore_shard2_replica_n2' using configuration from collection 
hdfsbackuprestore, trusted=true
   [junit4]   2> 1211111 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.m.r.SolrJmxReporter JMX monitoring 
for 'solr.core.hdfsbackuprestore.shard2.replica_n2' (registry 
'solr.core.hdfsbackuprestore.shard2.replica_n2') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1211112 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard2_replica_n2] ] Opening new SolrCore at 
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node2/hdfsbackuprestore_shard2_replica_n2],
 
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node2/./hdfsbackuprestore_shard2_replica_n2/data/]
   [junit4]   2> 1211115 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 1211115 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.CoreContainer Creating SolrCore 
'hdfsbackuprestore_shard1_replica_n1' using configuration from collection 
hdfsbackuprestore, trusted=true
   [junit4]   2> 1211115 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter JMX monitoring 
for 'solr.core.hdfsbackuprestore.shard1.replica_n1' (registry 
'solr.core.hdfsbackuprestore.shard1.replica_n1') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1211115 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard1_replica_n1] ] Opening new SolrCore at 
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/hdfsbackuprestore_shard1_replica_n1],
 
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/./hdfsbackuprestore_shard1_replica_n1/data/]
   [junit4]   2> 1211237 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateHandler Using UpdateLog 
implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1211237 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Initializing 
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1211238 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 1211238 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 1211244 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@190f322e[hdfsbackuprestore_shard1_replica_n1] main]
   [junit4]   2> 1211245 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.UpdateHandler Using UpdateLog 
implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1211245 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.UpdateLog Initializing 
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1211246 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 1211246 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 1211246 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1211247 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1211248 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 1211248 INFO  
(searcherExecutor-2349-thread-1-processing-n:127.0.0.1:41505_solr 
x:hdfsbackuprestore_shard1_replica_n1 c:hdfsbackuprestore s:shard1 
r:core_node3) [n:127.0.0.1:41505_solr c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard1_replica_n1] Registered new searcher 
Searcher@190f322e[hdfsbackuprestore_shard1_replica_n1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1211248 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.UpdateLog Could not find max 
version in index or recent updates, using new clock 1614815172282548224
   [junit4]   2> 1211251 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@57859385[hdfsbackuprestore_shard2_replica_n2] main]
   [junit4]   2> 1211252 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1211253 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1211253 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.h.ReplicationHandler Commits will 
be reserved for 10000ms.
   [junit4]   2> 1211254 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.UpdateLog Could not find max 
version in index or recent updates, using new clock 1614815172288839680
   [junit4]   2> 1211255 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard1 to 
Terms{values={core_node3=0}, version=0}
   [junit4]   2> 1211256 INFO  
(searcherExecutor-2348-thread-1-processing-n:127.0.0.1:37546_solr 
x:hdfsbackuprestore_shard2_replica_n2 c:hdfsbackuprestore s:shard2 
r:core_node4) [n:127.0.0.1:37546_solr c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard2_replica_n2] Registered new searcher 
Searcher@57859385[hdfsbackuprestore_shard2_replica_n2] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1211260 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext 
Enough replicas found to continue.
   [junit4]   2> 1211260 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I may 
be the new leader - try and sync
   [junit4]   2> 1211260 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_replica_n1/
   [junit4]   2> 1211261 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy Sync Success - now 
sync replicas to me
   [junit4]   2> 1211261 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.SyncStrategy 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_replica_n1/ has no 
replicas
   [junit4]   2> 1211261 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext Found 
all replicas participating in election, clear LIR
   [junit4]   2> 1211267 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard2 to 
Terms{values={core_node4=0}, version=0}
   [junit4]   2> 1211269 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext 
Enough replicas found to continue.
   [junit4]   2> 1211269 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext I may 
be the new leader - try and sync
   [junit4]   2> 1211269 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:37546/solr/hdfsbackuprestore_shard2_replica_n2/
   [junit4]   2> 1211269 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ShardLeaderElectionContext I am 
the new leader: 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_replica_n1/ shard1
   [junit4]   2> 1211270 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SyncStrategy Sync Success - now 
sync replicas to me
   [junit4]   2> 1211270 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SyncStrategy 
https://127.0.0.1:37546/solr/hdfsbackuprestore_shard2_replica_n2/ has no 
replicas
   [junit4]   2> 1211270 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext Found 
all replicas participating in election, clear LIR
   [junit4]   2> 1211273 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ShardLeaderElectionContext I am 
the new leader: 
https://127.0.0.1:37546/solr/hdfsbackuprestore_shard2_replica_n2/ shard2
   [junit4]   2> 1211375 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ZkController I am the leader, no 
recovery necessary
   [junit4]   2> 1211377 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.s.HttpSolrCall [admin] webapp=null 
path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node4&name=hdfsbackuprestore_shard2_replica_n2&action=CREATE&numShards=2&shard=shard2&wt=javabin}
 status=0 QTime=1304
   [junit4]   2> 1211423 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.c.ZkController I am the leader, no 
recovery necessary
   [junit4]   2> 1211425 INFO  (qtp1719133957-5564) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.s.HttpSolrCall [admin] webapp=null 
path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&collection=hdfsbackuprestore&version=2&replicaType=NRT&property.customKey=customValue&coreNodeName=core_node3&name=hdfsbackuprestore_shard1_replica_n1&action=CREATE&numShards=2&shard=shard1&wt=javabin}
 status=0 QTime=1358
   [junit4]   2> 1211428 INFO  (qtp835890382-5574) [n:127.0.0.1:37546_solr    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 1211525 INFO  (zkCallback-1448-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1211525 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1211723 INFO  
(OverseerCollectionConfigSetProcessor-73522006077341700-127.0.0.1:37546_solr-n_0000000000)
 [n:127.0.0.1:37546_solr    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 1212429 INFO  (qtp835890382-5574) [n:127.0.0.1:37546_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={pullReplicas=0&property.customKey=customValue&collection.configName=conf1&maxShardsPerNode=-1&router.field=shard_s&name=hdfsbackuprestore&nrtReplicas=1&action=CREATE&numShards=2&tlogReplicas=0&wt=javabin&version=2}
 status=0 QTime=2712
   [junit4]   2> 1212450 INFO  (qtp835890382-5579) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard2 to 
Terms{values={core_node4=1}, version=1}
   [junit4]   2> 1212450 INFO  (qtp835890382-5579) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.p.LogUpdateProcessorFactory 
[hdfsbackuprestore_shard2_replica_n2]  webapp=/solr path=/update 
params={wt=javabin&version=2}{add=[0 (1614815173524062208), 1 
(1614815173527207936), 2 (1614815173527207937), 3 (1614815173527207938), 4 
(1614815173527207939), 5 (1614815173527207940), 6 (1614815173528256512), 7 
(1614815173528256513)]} 0 17
   [junit4]   2> 1212460 INFO  (qtp1719133957-5575) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start 
commit{_version_=1614815173553422336,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1212461 INFO  (qtp1719133957-5575) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No 
uncommitted changes. Skipping IW.commit.
   [junit4]   2> 1212468 INFO  (qtp1719133957-5575) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 1212468 INFO  (qtp1719133957-5575) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory 
[hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 7
   [junit4]   2> 1212470 INFO  (qtp835890382-5576) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.DirectUpdateHandler2 start 
commit{_version_=1614815173562859520,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 1212470 INFO  (qtp835890382-5576) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.SolrIndexWriter Calling 
setCommitData with IW:org.apache.solr.update.SolrIndexWriter@309e991b 
commitCommandVersion:1614815173562859520
   [junit4]   2> 1212512 INFO  (qtp835890382-5576) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@15c2115a[hdfsbackuprestore_shard2_replica_n2] main]
   [junit4]   2> 1212512 INFO  (qtp835890382-5576) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 1212513 INFO  
(searcherExecutor-2348-thread-1-processing-n:127.0.0.1:37546_solr 
x:hdfsbackuprestore_shard2_replica_n2 c:hdfsbackuprestore s:shard2 
r:core_node4) [n:127.0.0.1:37546_solr c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard2_replica_n2] Registered new searcher 
Searcher@15c2115a[hdfsbackuprestore_shard2_replica_n2] 
main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(7.6.0):C8)))}
   [junit4]   2> 1212513 INFO  (qtp835890382-5576) [n:127.0.0.1:37546_solr 
c:hdfsbackuprestore s:shard2 r:core_node4 
x:hdfsbackuprestore_shard2_replica_n2] o.a.s.u.p.LogUpdateProcessorFactory 
[hdfsbackuprestore_shard2_replica_n2]  webapp=/solr path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_replica_n1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 44
   [junit4]   2> 1212514 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1 r:core_node3 
x:hdfsbackuprestore_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory 
[hdfsbackuprestore_shard1_replica_n1]  webapp=/solr path=/update 
params={_stateVer_=hdfsbackuprestore:4&waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 59
   [junit4]   2> 1212514 INFO  
(TEST-TestHdfsCloudBackupRestore.test-seed#[8607C92A69D883BC]) [    ] 
o.a.s.c.a.c.AbstractCloudBackupRestoreTestCase Indexed 8 docs to collection: 
hdfsbackuprestore
   [junit4]   2> 1212515 INFO  (qtp835890382-5574) [n:127.0.0.1:37546_solr    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :splitshard with params 
action=SPLITSHARD&collection=hdfsbackuprestore&shard=shard1&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 1212520 INFO  (qtp1719133957-5571) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace&wt=javabin&version=2&group=solr.node} 
status=0 QTime=0
   [junit4]   2> 1212521 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={wt=javabin&version=2&key=solr.core.hdfsbackuprestore.shard1.replica_n1:INDEX.sizeInBytes}
 status=0 QTime=0
   [junit4]   2> 1212626 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1212626 INFO  (zkCallback-1448-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1213529 INFO  (qtp835890382-5579) [n:127.0.0.1:37546_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={wt=javabin&version=2&key=solr.core.hdfsbackuprestore.shard2.replica_n2:INDEX.sizeInBytes}
 status=0 QTime=0
   [junit4]   2> 1213530 INFO  (qtp835890382-5579) [n:127.0.0.1:37546_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 1213531 INFO  (qtp1719133957-5571) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={wt=javabin&version=2&key=solr.core.hdfsbackuprestore.shard1.replica_n1:INDEX.sizeInBytes}
 status=0 QTime=0
   [junit4]   2> 1213533 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 1213533 INFO  
(OverseerThreadFactory-2339-thread-2-processing-n:127.0.0.1:37546_solr) 
[n:127.0.0.1:37546_solr c:hdfsbackuprestore s:shard1  ] 
o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:41505_solr for creating new 
replica of shard shard1_0 for collection hdfsbackuprestore
   [junit4]   2> 1213535 INFO  
(OverseerStateUpdate-73522006077341700-127.0.0.1:37546_solr-n_0000000000) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"addreplica",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1_0",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_0_replica_n5",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:41505/solr";,
   [junit4]   2>   "node_name":"127.0.0.1:41505_solr",
   [junit4]   2>   "type":"NRT"} 
   [junit4]   2> 1213637 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1213637 INFO  (zkCallback-1448-thread-2) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1213736 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.h.a.CoreAdminOperation core 
create command 
qt=/admin/cores&coreNodeName=core_node7&collection.configName=conf1&name=hdfsbackuprestore_shard1_0_replica_n5&action=CREATE&collection=hdfsbackuprestore&shard=shard1_0&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 1213748 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 7.6.0
   [junit4]   2> 1213814 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard1_0_replica_n5] Schema name=minimal
   [junit4]   2> 1213817 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 1213817 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.CoreContainer Creating 
SolrCore 'hdfsbackuprestore_shard1_0_replica_n5' using configuration from 
collection hdfsbackuprestore, trusted=true
   [junit4]   2> 1213817 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.m.r.SolrJmxReporter JMX 
monitoring for 'solr.core.hdfsbackuprestore.shard1_0.replica_n5' (registry 
'solr.core.hdfsbackuprestore.shard1_0.replica_n5') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1213817 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard1_0_replica_n5] ] Opening new SolrCore at 
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/hdfsbackuprestore_shard1_0_replica_n5],
 
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/./hdfsbackuprestore_shard1_0_replica_n5/data/]
   [junit4]   2> 1213842 INFO  (zkCallback-1448-thread-2) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1213842 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1213888 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.u.UpdateHandler Using UpdateLog 
implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1213888 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.u.UpdateLog Initializing 
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1213889 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 1213889 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 1213891 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@54bbbd0[hdfsbackuprestore_shard1_0_replica_n5] main]
   [junit4]   2> 1213893 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1213894 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1213894 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.h.ReplicationHandler Commits 
will be reserved for 10000ms.
   [junit4]   2> 1213894 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.u.UpdateLog Could not find max 
version in index or recent updates, using new clock 1614815175057080320
   [junit4]   2> 1213895 INFO  
(searcherExecutor-2358-thread-1-processing-n:127.0.0.1:41505_solr 
x:hdfsbackuprestore_shard1_0_replica_n5 c:hdfsbackuprestore s:shard1_0 
r:core_node7) [n:127.0.0.1:41505_solr c:hdfsbackuprestore s:shard1_0 
r:core_node7 x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard1_0_replica_n5] Registered new searcher 
Searcher@54bbbd0[hdfsbackuprestore_shard1_0_replica_n5] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1213896 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.u.UpdateLog Starting to buffer 
updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 1213898 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard1_0 to 
Terms{values={core_node7=0}, version=0}
   [junit4]   2> 1213903 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.ShardLeaderElectionContext 
Enough replicas found to continue.
   [junit4]   2> 1213903 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.ShardLeaderElectionContext I 
may be the new leader - try and sync
   [junit4]   2> 1213903 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_0_replica_n5/
   [junit4]   2> 1213903 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.SyncStrategy Sync Success - 
now sync replicas to me
   [junit4]   2> 1213903 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.SyncStrategy 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_0_replica_n5/ has no 
replicas
   [junit4]   2> 1213903 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.ShardLeaderElectionContext 
Found all replicas participating in election, clear LIR
   [junit4]   2> 1213906 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.ShardLeaderElectionContext I 
am the new leader: 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_0_replica_n5/ shard1_0
   [junit4]   2> 1214007 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1214007 INFO  (zkCallback-1448-thread-2) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1214057 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.c.ZkController I am the leader, 
no recovery necessary
   [junit4]   2> 1214059 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_0 r:core_node7 
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.s.HttpSolrCall [admin] 
webapp=null path=/admin/cores 
params={qt=/admin/cores&coreNodeName=core_node7&collection.configName=conf1&name=hdfsbackuprestore_shard1_0_replica_n5&action=CREATE&collection=hdfsbackuprestore&shard=shard1_0&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=323
   [junit4]   2> 1214163 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1214163 INFO  (zkCallback-1448-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1215065 INFO  (qtp835890382-5578) [n:127.0.0.1:37546_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={wt=javabin&version=2&key=solr.core.hdfsbackuprestore.shard2.replica_n2:INDEX.sizeInBytes}
 status=0 QTime=1
   [junit4]   2> 1215066 INFO  (qtp835890382-5579) [n:127.0.0.1:37546_solr    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 1215068 INFO  (qtp1719133957-5571) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={wt=javabin&version=2&key=solr.core.hdfsbackuprestore.shard1.replica_n1:INDEX.sizeInBytes&key=solr.core.hdfsbackuprestore.shard1_0.replica_n5:INDEX.sizeInBytes}
 status=0 QTime=1
   [junit4]   2> 1215069 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=1
   [junit4]   2> 1215070 INFO  
(OverseerThreadFactory-2339-thread-2-processing-n:127.0.0.1:37546_solr) 
[n:127.0.0.1:37546_solr c:hdfsbackuprestore s:shard1  ] 
o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:41505_solr for creating new 
replica of shard shard1_1 for collection hdfsbackuprestore
   [junit4]   2> 1215072 INFO  
(OverseerStateUpdate-73522006077341700-127.0.0.1:37546_solr-n_0000000000) 
[n:127.0.0.1:37546_solr    ] o.a.s.c.o.SliceMutator createReplica() {
   [junit4]   2>   "operation":"addreplica",
   [junit4]   2>   "collection":"hdfsbackuprestore",
   [junit4]   2>   "shard":"shard1_1",
   [junit4]   2>   "core":"hdfsbackuprestore_shard1_1_replica_n6",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "base_url":"https://127.0.0.1:41505/solr";,
   [junit4]   2>   "node_name":"127.0.0.1:41505_solr",
   [junit4]   2>   "type":"NRT"} 
   [junit4]   2> 1215174 INFO  (zkCallback-1448-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1215174 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1215273 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.h.a.CoreAdminOperation core 
create command 
qt=/admin/cores&coreNodeName=core_node8&collection.configName=conf1&name=hdfsbackuprestore_shard1_1_replica_n6&action=CREATE&collection=hdfsbackuprestore&shard=shard1_1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 1215283 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.SolrConfig Using Lucene 
MatchVersion: 7.6.0
   [junit4]   2> 1215289 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.s.IndexSchema 
[hdfsbackuprestore_shard1_1_replica_n6] Schema name=minimal
   [junit4]   2> 1215291 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.s.IndexSchema Loaded schema 
minimal/1.1 with uniqueid field id
   [junit4]   2> 1215292 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.CoreContainer Creating 
SolrCore 'hdfsbackuprestore_shard1_1_replica_n6' using configuration from 
collection hdfsbackuprestore, trusted=true
   [junit4]   2> 1215292 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.m.r.SolrJmxReporter JMX 
monitoring for 'solr.core.hdfsbackuprestore.shard1_1.replica_n6' (registry 
'solr.core.hdfsbackuprestore.shard1_1.replica_n6') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@661840da
   [junit4]   2> 1215292 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.SolrCore 
[[hdfsbackuprestore_shard1_1_replica_n6] ] Opening new SolrCore at 
[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/hdfsbackuprestore_shard1_1_replica_n6],
 
dataDir=[/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/build/solr-core/test/J0/temp/solr.cloud.api.collections.TestHdfsCloudBackupRestore_8607C92A69D883BC-001/tempDir-002/node1/./hdfsbackuprestore_shard1_1_replica_n6/data/]
   [junit4]   2> 1215347 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.u.UpdateHandler Using UpdateLog 
implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 1215347 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.u.UpdateLog Initializing 
UpdateLog: dataDir=null defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 1215348 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.u.CommitTracker Hard AutoCommit: 
disabled
   [junit4]   2> 1215348 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.u.CommitTracker Soft AutoCommit: 
disabled
   [junit4]   2> 1215350 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@757be01[hdfsbackuprestore_shard1_1_replica_n6] main]
   [junit4]   2> 1215351 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.r.ManagedResourceStorage 
Configured ZooKeeperStorageIO with znodeBase: /configs/conf1
   [junit4]   2> 1215352 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.r.ManagedResourceStorage Loaded 
null at path _rest_managed.json using ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 1215352 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.h.ReplicationHandler Commits 
will be reserved for 10000ms.
   [junit4]   2> 1215353 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.u.UpdateLog Could not find max 
version in index or recent updates, using new clock 1614815176586952704
   [junit4]   2> 1215353 INFO  
(searcherExecutor-2363-thread-1-processing-n:127.0.0.1:41505_solr 
x:hdfsbackuprestore_shard1_1_replica_n6 c:hdfsbackuprestore s:shard1_1 
r:core_node8) [n:127.0.0.1:41505_solr c:hdfsbackuprestore s:shard1_1 
r:core_node8 x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.SolrCore 
[hdfsbackuprestore_shard1_1_replica_n6] Registered new searcher 
Searcher@757be01[hdfsbackuprestore_shard1_1_replica_n6] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 1215354 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.u.UpdateLog Starting to buffer 
updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 1215357 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.ZkShardTerms Successful update 
of terms at /collections/hdfsbackuprestore/terms/shard1_1 to 
Terms{values={core_node8=0}, version=0}
   [junit4]   2> 1215359 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.ShardLeaderElectionContext 
Enough replicas found to continue.
   [junit4]   2> 1215359 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.ShardLeaderElectionContext I 
may be the new leader - try and sync
   [junit4]   2> 1215359 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_1_replica_n6/
   [junit4]   2> 1215359 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.SyncStrategy Sync Success - 
now sync replicas to me
   [junit4]   2> 1215359 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.SyncStrategy 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_1_replica_n6/ has no 
replicas
   [junit4]   2> 1215359 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.ShardLeaderElectionContext 
Found all replicas participating in election, clear LIR
   [junit4]   2> 1215362 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.ShardLeaderElectionContext I 
am the new leader: 
https://127.0.0.1:41505/solr/hdfsbackuprestore_shard1_1_replica_n6/ shard1_1
   [junit4]   2> 1215464 INFO  (zkCallback-1462-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1215464 INFO  (zkCallback-1448-thread-2) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/hdfsbackuprestore/state.json] for collection 
[hdfsbackuprestore] has occurred - updating... (live nodes size: [2])
   [junit4]   2> 1215513 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.c.ZkController I am the leader, 
no recovery necessary
   [junit4]   2> 1215516 INFO  (qtp1719133957-5577) [n:127.0.0.1:41505_solr 
c:hdfsbackuprestore s:shard1_1 r:core_node8 
x:hdfsbackuprestore_shard1_1_replica_n6] o.a.s.s.HttpSolrCall [admin] 
webapp=null path=/admin/cores 
params={qt=/admin/cores&coreNodeName=core_node8&collection.configName=conf1&name=hdfsbackuprestore_shard1_1_replica_n6&action=CREATE&collection=hdfsbackuprestore&shard=shard1_1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=242
   [junit4]   2> 1215517 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.h.a.PrepRecoveryOp Going to wait 
for coreNodeName: core_node7, state: active, checkLive: true, onlyIfLeader: 
true, onlyIfLeaderActive: null, maxTime: 183 s
   [junit4]   2> 1215517 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.h.a.PrepRecoveryOp In 
WaitForState(active): collection=hdfsbackuprestore, shard=shard1_0, 
thisCore=hdfsbackuprestore_shard1_0_replica_n5, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=active, localState=active, nodeName=127.0.0.1:41505_solr, 
coreNodeName=core_node7, onlyIfActiveCheckResult=false, nodeProps: 
core_node7:{"core":"hdfsbackuprestore_shard1_0_replica_n5","base_url":"https://127.0.0.1:41505/solr","node_name":"127.0.0.1:41505_solr","state":"active","type":"NRT","force_set_state":"false","leader":"true"}
   [junit4]   2> 1215517 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.h.a.PrepRecoveryOp Waited 
coreNodeName: core_node7, state: active, checkLive: true, onlyIfLeader: true 
for: 0 seconds.
   [junit4]   2> 1215517 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
x:hdfsbackuprestore_shard1_0_replica_n5] o.a.s.s.HttpSolrCall [admin] 
webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:41505_solr&core=hdfsbackuprestore_shard1_0_replica_n5&qt=/admin/cores&coreNodeName=core_node7&action=PREPRECOVERY&checkLive=true&state=active&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=0
   [junit4]   2> 1215519 INFO  (qtp1719133957-5565) [n:127.0.0.1:41505_solr    
x:hd

[...truncated too long message...]

 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/lucene/top-level-ivy-settings.xml

resolve:

jar-checksums:
    [mkdir] Created dir: 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/null250017862
     [copy] Copying 241 files to 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/null250017862
   [delete] Deleting directory 
/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-Tests-7.x/solr/null250017862

check-working-copy:
[ivy:cachepath] :: resolving dependencies :: 
org.eclipse.jgit#org.eclipse.jgit-caller;working
[ivy:cachepath]         confs: [default]
[ivy:cachepath]         found 
org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public
[ivy:cachepath]         found com.jcraft#jsch;0.1.53 in public
[ivy:cachepath]         found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath]         found org.apache.httpcomponents#httpclient;4.3.6 in 
public
[ivy:cachepath]         found org.apache.httpcomponents#httpcore;4.3.3 in public
[ivy:cachepath]         found commons-logging#commons-logging;1.1.3 in public
[ivy:cachepath]         found commons-codec#commons-codec;1.6 in public
[ivy:cachepath]         found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 32ms :: artifacts dl 4ms
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   8   |   0   |   0   |   0   ||   8   |   0   |
        ---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
[wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation
[wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for 
further details.
[wc-checker] Checking working copy status...

-jenkins-base:

BUILD SUCCESSFUL
Total time: 230 minutes 13 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath$34.hasMatch(FilePath.java:2678)
        at hudson.FilePath$34.invoke(FilePath.java:2557)
        at hudson.FilePath$34.invoke(FilePath.java:2547)
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918)
Also:   hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene2
                at 
hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
                at 
hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
                at hudson.remoting.Channel.call(Channel.java:955)
                at hudson.FilePath.act(FilePath.java:1036)
                at hudson.FilePath.act(FilePath.java:1025)
                at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
                at 
hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
                at 
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
                at 
hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
                at 
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
                at 
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
                at hudson.model.Build$BuildExecution.post2(Build.java:186)
                at 
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
                at hudson.model.Run.execute(Run.java:1819)
                at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
                at 
hudson.model.ResourceController.execute(ResourceController.java:97)
                at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920)
        at hudson.remoting.UserRequest.perform(UserRequest.java:212)
        at hudson.remoting.UserRequest.perform(UserRequest.java:54)
        at hudson.remoting.Request$2.run(Request.java:369)
        at 
hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no 
matches found within 10000
        at hudson.FilePath.act(FilePath.java:1038)
        at hudson.FilePath.act(FilePath.java:1025)
        at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
        at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
        at 
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
        at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
        at hudson.model.Build$BuildExecution.post2(Build.java:186)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
        at hudson.model.Run.execute(Run.java:1819)
        at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
        at hudson.model.ResourceController.execute(ResourceController.java:97)
        at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern 
"**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to