Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1695/

4 tests failed.
FAILED:  org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest.test

Error Message:
There are still nodes recoverying - waited for 320 seconds

Stack Trace:
java.lang.AssertionError: There are still nodes recoverying - waited for 320 
seconds
        at 
__randomizedtesting.SeedInfo.seed([632D3BDB9FC82BE0:EB79040131344618]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at 
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:185)
        at 
org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:920)
        at 
org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForThingsToLevelOut(AbstractFullDistribZkTestBase.java:1477)
        at 
org.apache.solr.cloud.RestartWhileUpdatingTest.test(RestartWhileUpdatingTest.java:145)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1742)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:935)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:971)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:985)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1010)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:985)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:944)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:830)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:880)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:891)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)


FAILED:  
junit.framework.TestSuite.org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest

Error Message:
7 threads leaked from SUITE scope at 
org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest:     1) 
Thread[id=28018, name=searcherExecutor-8809-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    2) Thread[id=28099, 
name=searcherExecutor-8837-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    3) Thread[id=27976, 
name=searcherExecutor-8795-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    4) Thread[id=28216, 
name=searcherExecutor-8871-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    5) Thread[id=28055, 
name=searcherExecutor-8823-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    6) Thread[id=28251, 
name=searcherExecutor-8885-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    7) Thread[id=28181, 
name=searcherExecutor-8857-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)

Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: 7 threads leaked from SUITE 
scope at org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest: 
   1) Thread[id=28018, name=searcherExecutor-8809-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   2) Thread[id=28099, name=searcherExecutor-8837-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   3) Thread[id=27976, name=searcherExecutor-8795-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   4) Thread[id=28216, name=searcherExecutor-8871-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   5) Thread[id=28055, name=searcherExecutor-8823-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   6) Thread[id=28251, name=searcherExecutor-8885-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   7) Thread[id=28181, name=searcherExecutor-8857-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
        at __randomizedtesting.SeedInfo.seed([632D3BDB9FC82BE0]:0)


FAILED:  
junit.framework.TestSuite.org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest

Error Message:
There are still zombie threads that couldn't be terminated:    1) 
Thread[id=28018, name=searcherExecutor-8809-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    2) Thread[id=28099, 
name=searcherExecutor-8837-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    3) Thread[id=27976, 
name=searcherExecutor-8795-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    4) Thread[id=28216, 
name=searcherExecutor-8871-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    5) Thread[id=28055, 
name=searcherExecutor-8823-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    6) Thread[id=28251, 
name=searcherExecutor-8885-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)    7) Thread[id=28181, 
name=searcherExecutor-8857-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]         at sun.misc.Unsafe.park(Native 
Method)         at 
java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)         at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
         at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)     
    at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)   
      at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134) 
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) 
        at java.lang.Thread.run(Thread.java:748)

Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie 
threads that couldn't be terminated:
   1) Thread[id=28018, name=searcherExecutor-8809-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   2) Thread[id=28099, name=searcherExecutor-8837-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   3) Thread[id=27976, name=searcherExecutor-8795-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   4) Thread[id=28216, name=searcherExecutor-8871-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   5) Thread[id=28055, name=searcherExecutor-8823-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   6) Thread[id=28251, name=searcherExecutor-8885-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
   7) Thread[id=28181, name=searcherExecutor-8857-thread-1, state=WAITING, 
group=TGRP-HdfsRestartWhileUpdatingTest]
        at sun.misc.Unsafe.park(Native Method)
        at java.util.concurrent.locks.LockSupport.park(LockSupport.java:175)
        at 
java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2039)
        at 
java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442)
        at 
java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1074)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1134)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
        at java.lang.Thread.run(Thread.java:748)
        at __randomizedtesting.SeedInfo.seed([632D3BDB9FC82BE0]:0)


FAILED:  
org.apache.solr.client.solrj.io.stream.MathExpressionTest.testGammaDistribution

Error Message:


Stack Trace:
java.lang.AssertionError
        at 
__randomizedtesting.SeedInfo.seed([39D97E942341B2E:3EE7BC47614CB139]:0)
        at org.junit.Assert.fail(Assert.java:92)
        at org.junit.Assert.assertTrue(Assert.java:43)
        at org.junit.Assert.assertTrue(Assert.java:54)
        at 
org.apache.solr.client.solrj.io.stream.MathExpressionTest.testGammaDistribution(MathExpressionTest.java:4372)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1742)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:935)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:971)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:985)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:944)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:830)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:880)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:891)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 15615 lines...]
   [junit4] Suite: org.apache.solr.cloud.hdfs.HdfsRestartWhileUpdatingTest
   [junit4]   2> 3225000 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: 
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/init-core-data-001
   [junit4]   2> 3225001 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) 
w/NUMERIC_DOCVALUES_SYSPROP=true
   [junit4]   2> 3225004 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: 
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
   [junit4]   2> 3225005 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 3225059 WARN  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried 
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 3225066 WARN  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 3225072 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 3225094 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log Extract 
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
 to ./temp/Jetty_localhost_38054_hdfs____1p4qus/webapp
   [junit4]   2> 3225502 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:38054
   [junit4]   2> 3225635 WARN  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 3225637 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 3225660 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log Extract 
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_40361_datanode____wx1995/webapp
   [junit4]   2> 3226101 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:40361
   [junit4]   2> 3226186 WARN  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 3226188 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log jetty-6.1.26
   [junit4]   2> 3226218 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log Extract 
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_39600_datanode____.6o3hqh/webapp
   [junit4]   2> 3226286 ERROR (DataNode: 
[[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/tempDir-001/hdfsBaseDir/data/data1/,
 
[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/tempDir-001/hdfsBaseDir/data/data2/]]
  heartbeating to localhost/127.0.0.1:39350) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 3226311 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0xaf32f99a16c76: from storage 
DS-c4f9e6ac-a29a-4061-a3f7-44b02a16e4e2 node 
DatanodeRegistration(127.0.0.1:33373, 
datanodeUuid=f7c28b02-7907-4f10-9aa2-75b5ec23fedf, infoPort=37596, 
infoSecurePort=0, ipcPort=43199, 
storageInfo=lv=-56;cid=testClusterID;nsid=1911748525;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 0 msecs
   [junit4]   2> 3226311 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0xaf32f99a16c76: from storage 
DS-466220b3-7725-44b0-819b-b7d5538b5db0 node 
DatanodeRegistration(127.0.0.1:33373, 
datanodeUuid=f7c28b02-7907-4f10-9aa2-75b5ec23fedf, infoPort=37596, 
infoSecurePort=0, ipcPort=43199, 
storageInfo=lv=-56;cid=testClusterID;nsid=1911748525;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 3226686 INFO  
(SUITE-HdfsRestartWhileUpdatingTest-seed#[632D3BDB9FC82BE0]-worker) [    ] 
o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:39600
   [junit4]   2> 3226833 ERROR (DataNode: 
[[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/tempDir-001/hdfsBaseDir/data/data3/,
 
[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/tempDir-001/hdfsBaseDir/data/data4/]]
  heartbeating to localhost/127.0.0.1:39350) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 3226842 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0xaf32fb941d1c2: from storage 
DS-9c7ec864-b623-4b0e-ab18-ae2240bb00e2 node 
DatanodeRegistration(127.0.0.1:41285, 
datanodeUuid=b5b22557-5901-468c-b32e-246ec9684c1b, infoPort=42938, 
infoSecurePort=0, ipcPort=43065, 
storageInfo=lv=-56;cid=testClusterID;nsid=1911748525;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 0 msecs
   [junit4]   2> 3226842 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0xaf32fb941d1c2: from storage 
DS-f86b725c-f545-48ef-a34c-d620bafaa237 node 
DatanodeRegistration(127.0.0.1:41285, 
datanodeUuid=b5b22557-5901-468c-b32e-246ec9684c1b, infoPort=42938, 
infoSecurePort=0, ipcPort=43065, 
storageInfo=lv=-56;cid=testClusterID;nsid=1911748525;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 3227176 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 3227177 INFO  (Thread-6492) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 3227177 INFO  (Thread-6492) [    ] o.a.s.c.ZkTestServer 
Starting server
   [junit4]   2> 3227185 ERROR (Thread-6492) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 3227277 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:42319
   [junit4]   2> 3227288 INFO  (zkConnectionManagerCallback-9082-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227301 INFO  (zkConnectionManagerCallback-9084-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227308 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 3227310 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/schema15.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 3227312 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 3227314 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 3227316 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 3227317 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 3227319 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 3227321 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 3227322 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 3227324 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 3227325 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 3227329 INFO  (zkConnectionManagerCallback-9088-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227331 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly 
asked otherwise
   [junit4]   2> 3227508 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: 
d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_191-b12
   [junit4]   2> 3227509 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 3227509 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 3227509 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 3227509 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@7202fd92{/,null,AVAILABLE}
   [junit4]   2> 3227510 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@68524012{SSL,[ssl, 
http/1.1]}{127.0.0.1:38742}
   [junit4]   2> 3227510 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.Server Started @3227569ms
   [junit4]   2> 3227510 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=hdfs://localhost:39350/hdfs__localhost_39350__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-master_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001_tempDir-002_control_data,
 replicaType=NRT, hostContext=/, hostPort=38742, 
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/../../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/control-001/cores}
   [junit4]   2> 3227510 ERROR 
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 3227511 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 3227511 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 3227511 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 3227511 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 3227511 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2018-11-11T14:42:31.803Z
   [junit4]   2> 3227513 INFO  (zkConnectionManagerCallback-9090-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227514 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 3227514 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/control-001/solr.xml
   [junit4]   2> 3227520 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay 
is ignored
   [junit4]   2> 3227520 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter 
autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 3227522 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 3227568 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42319/solr
   [junit4]   2> 3227598 INFO  (zkConnectionManagerCallback-9094-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227601 INFO  (zkConnectionManagerCallback-9096-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227715 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.c.OverseerElectionContext I am going to be the 
leader 127.0.0.1:38742_
   [junit4]   2> 3227716 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer Overseer 
(id=72259584929300485-127.0.0.1:38742_-n_0000000000) starting
   [junit4]   2> 3227724 INFO  (zkConnectionManagerCallback-9103-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227727 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 
127.0.0.1:42319/solr ready
   [junit4]   2> 3227728 INFO  
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer Starting to work on the main queue : 
127.0.0.1:38742_
   [junit4]   2> 3227729 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:38742_
   [junit4]   2> 3227729 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.OverseerTriggerThread Adding .auto_add_replicas and 
.scheduled_maintenance triggers
   [junit4]   2> 3227730 INFO  
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 3227736 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.OverseerTriggerThread Refreshing /autoscaling.json with znode 
version 1
   [junit4]   2> 3227736 INFO  (zkCallback-9102-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 3227736 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.OverseerTriggerThread Current znodeVersion 1, 
lastZnodeVersion -1
   [junit4]   2> 3227736 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.OverseerTriggerThread Processed trigger updates upto 
znodeVersion 1
   [junit4]   2> 3227742 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.NodeLostTrigger NodeLostTrigger .auto_add_replicas - Initial 
livenodes: [127.0.0.1:38742_]
   [junit4]   2> 3227742 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.OverseerTriggerThread -- clean old nodeAdded markers
   [junit4]   2> 3227742 DEBUG 
(OverseerAutoScalingTriggerThread-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [    ] o.a.s.c.a.OverseerTriggerThread Current znodeVersion 1, 
lastZnodeVersion 1
   [junit4]   2> 3227743 DEBUG (ScheduledTrigger-8788-thread-1) [    ] 
o.a.s.c.a.NodeLostTrigger Running NodeLostTrigger: .auto_add_replicas with 
currently live nodes: 1
   [junit4]   2> 3227757 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.h.a.MetricsHistoryHandler No .system collection, 
keeping metrics history in memory.
   [junit4]   2> 3227792 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3227811 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3227811 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3227813 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) 
[n:127.0.0.1:38742_    ] o.a.s.c.CorePropertiesLocator Found 0 core definitions 
underneath 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/../../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/control-001/cores
   [junit4]   2> 3227846 INFO  (zkConnectionManagerCallback-9109-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3227848 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 3227849 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42319/solr ready
   [junit4]   2> 3227898 INFO  (qtp316018699-27935) [n:127.0.0.1:38742_    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:38742_&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 3227901 INFO  
(OverseerThreadFactory-8790-thread-1-processing-n:127.0.0.1:38742_) 
[n:127.0.0.1:38742_    ] o.a.s.c.a.c.CreateCollectionCmd Create collection 
control_collection
   [junit4]   2> 3227905 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "name":"control_collection",
   [junit4]   2>   "fromApi":"true",
   [junit4]   2>   "collection.configName":"conf1",
   [junit4]   2>   "numShards":"1",
   [junit4]   2>   "createNodeSet":"127.0.0.1:38742_",
   [junit4]   2>   "nrtReplicas":"1",
   [junit4]   2>   "stateFormat":"2",
   [junit4]   2>   "replicationFactor":"1",
   [junit4]   2>   "operation":"create"} current state version: 0
   [junit4]   2> 3228030 INFO  (qtp316018699-27938) [n:127.0.0.1:38742_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 3228037 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_    
x:control_collection_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core 
create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 3228037 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_    
x:control_collection_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault 
Allocating transient cache for 4 transient cores
   [junit4]   2> 3228040 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "core":"control_collection_shard1_replica_n1",
   [junit4]   2>   "roles":null,
   [junit4]   2>   "base_url":"https://127.0.0.1:38742";,
   [junit4]   2>   "node_name":"127.0.0.1:38742_",
   [junit4]   2>   "numShards":"1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "collection":"control_collection",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "operation":"state"} current state version: 0
   [junit4]   2> 3228752 DEBUG (ScheduledTrigger-8788-thread-3) [    ] 
o.a.s.c.a.NodeLostTrigger Running NodeLostTrigger: .auto_add_replicas with 
currently live nodes: 1
   [junit4]   2> 3229062 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 3229078 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
   [junit4]   2> 3229208 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
   [junit4]   2> 3229233 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' 
using configuration from collection control_collection, trusted=true
   [junit4]   2> 3229234 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.control_collection.shard1.replica_n1' (registry 
'solr.core.control_collection.shard1.replica_n1') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3229234 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore 
at 
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/control-001/cores/control_collection_shard1_replica_n1],
 
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/../../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/control-001/cores/control_collection_shard1_replica_n1/data/]
   [junit4]   2> 3229241 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=21, maxMergeAtOnceExplicit=33, maxMergedSegmentMB=90.9609375, 
floorSegmentMB=1.1337890625, forceMergeDeletesPctAllowed=5.110120031139312, 
segmentsPerTier=17.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.5329788485702183, deletesPctAllowed=33.27283212168082
   [junit4]   2> 3229256 WARN  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 3229355 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 3229355 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 3229356 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.CommitTracker Hard AutoCommit: if uncommitted for 30000ms; 
   [junit4]   2> 3229356 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.CommitTracker Soft AutoCommit: if uncommitted for 3000ms; 
   [junit4]   2> 3229358 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.MockRandomMergePolicy: 
org.apache.lucene.index.MockRandomMergePolicy@2f8f00e3
   [junit4]   2> 3229358 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@60713cab[control_collection_shard1_replica_n1] main]
   [junit4]   2> 3229360 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 3229361 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 3229362 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 3229362 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1616848988305096704
   [junit4]   2> 3229363 INFO  
(searcherExecutor-8795-thread-1-processing-n:127.0.0.1:38742_ 
x:control_collection_shard1_replica_n1 c:control_collection s:shard1) 
[n:127.0.0.1:38742_ c:control_collection s:shard1  
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore 
[control_collection_shard1_replica_n1] Registered new searcher 
Searcher@60713cab[control_collection_shard1_replica_n1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 3229370 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ZkShardTerms Successful update of terms at 
/collections/control_collection/terms/shard1 to Terms{values={core_node2=0}, 
version=0}
   [junit4]   2> 3229374 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 3229374 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 3229374 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:38742/control_collection_shard1_replica_n1/
   [junit4]   2> 3229374 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 3229375 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy 
https://127.0.0.1:38742/control_collection_shard1_replica_n1/ has no replicas
   [junit4]   2> 3229380 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
https://127.0.0.1:38742/control_collection_shard1_replica_n1/ shard1
   [junit4]   2> 3229380 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "operation":"leader",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "collection":"control_collection",
   [junit4]   2>   "base_url":"https://127.0.0.1:38742";,
   [junit4]   2>   "core":"control_collection_shard1_replica_n1",
   [junit4]   2>   "state":"active"} current state version: 0
   [junit4]   2> 3229482 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 3229484 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "core":"control_collection_shard1_replica_n1",
   [junit4]   2>   "core_node_name":"core_node2",
   [junit4]   2>   "roles":null,
   [junit4]   2>   "base_url":"https://127.0.0.1:38742";,
   [junit4]   2>   "node_name":"127.0.0.1:38742_",
   [junit4]   2>   "numShards":"1",
   [junit4]   2>   "state":"active",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "collection":"control_collection",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "operation":"state"} current state version: 0
   [junit4]   2> 3229485 INFO  (qtp316018699-27937) [n:127.0.0.1:38742_ 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=1448
   [junit4]   2> 3229491 INFO  (qtp316018699-27935) [n:127.0.0.1:38742_    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 3229585 INFO  (zkCallback-9095-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent 
state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 3229755 DEBUG (ScheduledTrigger-8788-thread-4) [    ] 
o.a.s.c.a.NodeLostTrigger Running NodeLostTrigger: .auto_add_replicas with 
currently live nodes: 1
   [junit4]   2> 3229903 INFO  
(OverseerCollectionConfigSetProcessor-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [n:127.0.0.1:38742_    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 3230492 INFO  (qtp316018699-27935) [n:127.0.0.1:38742_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:38742_&wt=javabin&version=2}
 status=0 QTime=2593
   [junit4]   2> 3230501 INFO  (zkConnectionManagerCallback-9114-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3230503 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 3230504 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42319/solr ready
   [junit4]   2> 3230504 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection 
loss:false
   [junit4]   2> 3230509 INFO  (qtp316018699-27936) [n:127.0.0.1:38742_    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 3230512 INFO  
(OverseerThreadFactory-8790-thread-2-processing-n:127.0.0.1:38742_) 
[n:127.0.0.1:38742_    ] o.a.s.c.a.c.CreateCollectionCmd Create collection 
collection1
   [junit4]   2> 3230516 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "name":"collection1",
   [junit4]   2>   "fromApi":"true",
   [junit4]   2>   "collection.configName":"conf1",
   [junit4]   2>   "numShards":"1",
   [junit4]   2>   "createNodeSet":"",
   [junit4]   2>   "stateFormat":"2",
   [junit4]   2>   "nrtReplicas":"1",
   [junit4]   2>   "replicationFactor":"1",
   [junit4]   2>   "operation":"create"} current state version: 0
   [junit4]   2> 3230717 WARN  
(OverseerThreadFactory-8790-thread-2-processing-n:127.0.0.1:38742_) 
[n:127.0.0.1:38742_    ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to 
create a collection (collection1) without cores.
   [junit4]   2> 3230723 INFO  (qtp316018699-27936) [n:127.0.0.1:38742_    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 3230724 INFO  (qtp316018699-27936) [n:127.0.0.1:38742_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2}
 status=0 QTime=215
   [junit4]   2> 3230755 DEBUG (ScheduledTrigger-8788-thread-3) [    ] 
o.a.s.c.a.NodeLostTrigger Running NodeLostTrigger: .auto_add_replicas with 
currently live nodes: 1
   [junit4]   2> 3230898 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/shard-1-001
 of type NRT
   [junit4]   2> 3230900 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.Server jetty-9.4.11.v20180605; built: 2018-06-05T18:24:03.829Z; git: 
d5fc0523cfa96bfebfbda19606cad384d772f04c; jvm 1.8.0_191-b12
   [junit4]   2> 3230901 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.session DefaultSessionIdManager workerName=node0
   [junit4]   2> 3230901 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.session No SessionScavenger set, using defaults
   [junit4]   2> 3230901 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.session node0 Scavenging every 600000ms
   [junit4]   2> 3230902 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@57d27535{/,null,AVAILABLE}
   [junit4]   2> 3230902 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@ec5d759{SSL,[ssl, 
http/1.1]}{127.0.0.1:37840}
   [junit4]   2> 3230902 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.e.j.s.Server Started @3230961ms
   [junit4]   2> 3230902 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=hdfs://localhost:39350/hdfs__localhost_39350__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-master_checkout_solr_build_solr-core_test_J2_temp_solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001_tempDir-002_jetty1,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=37840, 
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/shard-1-001/cores}
   [junit4]   2> 3230903 ERROR 
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 3230903 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter Using logger factory 
org.apache.logging.slf4j.Log4jLoggerFactory
   [junit4]   2> 3230903 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 3230903 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 3230903 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 3230903 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2018-11-11T14:42:35.195Z
   [junit4]   2> 3230905 INFO  (zkConnectionManagerCallback-9116-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3230906 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 3230906 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/shard-1-001/solr.xml
   [junit4]   2> 3230911 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay 
is ignored
   [junit4]   2> 3230911 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter 
autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 3230913 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 3231107 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42319/solr
   [junit4]   2> 3231110 INFO  (zkConnectionManagerCallback-9120-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3231118 INFO  (zkConnectionManagerCallback-9122-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3231131 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 3231139 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 3231139 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:37840_
   [junit4]   2> 3231140 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "operation":"downnode",
   [junit4]   2>   "node_name":"127.0.0.1:37840_"} current state version: 0
   [junit4]   2> 3231142 INFO  (zkCallback-9095-thread-2) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 3231143 INFO  (zkCallback-9102-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 3231143 INFO  (zkCallback-9121-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 3231146 INFO  (zkCallback-9113-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 3231171 INFO  (zkConnectionManagerCallback-9129-thread-1) [    
] o.a.s.c.c.ConnectionManager zkClient has connected
   [junit4]   2> 3231172 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 3231174 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42319/solr ready
   [junit4]   2> 3231174 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.h.a.MetricsHistoryHandler No .system collection, keeping metrics history 
in memory.
   [junit4]   2> 3231203 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3231220 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3231220 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3231223 INFO  
(TEST-HdfsRestartWhileUpdatingTest.test-seed#[632D3BDB9FC82BE0]) [    ] 
o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/shard-1-001/cores
   [junit4]   2> 3231325 INFO  (qtp1625444062-27989) [n:127.0.0.1:37840_    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params 
node=127.0.0.1:37840_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 3231328 INFO  
(OverseerCollectionConfigSetProcessor-72259584929300485-127.0.0.1:38742_-n_0000000000)
 [n:127.0.0.1:38742_    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000002 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 3231351 INFO  (qtp1625444062-27992) [n:127.0.0.1:37840_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 3231358 INFO  (qtp316018699-27935) [n:127.0.0.1:38742_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={wt=javabin&version=2&key=solr.core.control_collection.shard1.replica_n1:INDEX.sizeInBytes}
 status=0 QTime=4
   [junit4]   2> 3231359 INFO  (qtp316018699-27936) [n:127.0.0.1:38742_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/metrics 
params={prefix=CONTAINER.fs.usableSpace,CONTAINER.fs.totalSpace,CORE.coreName&wt=javabin&version=2&group=solr.node,solr.core}
 status=0 QTime=0
   [junit4]   2> 3231361 INFO  
(OverseerThreadFactory-8790-thread-3-processing-n:127.0.0.1:38742_) 
[n:127.0.0.1:38742_ c:collection1 s:shard1  ] o.a.s.c.a.c.AddReplicaCmd Node 
Identified 127.0.0.1:37840_ for creating new replica of shard shard1 for 
collection collection1
   [junit4]   2> 3231366 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_    
x:collection1_shard1_replica_n21] o.a.s.h.a.CoreAdminOperation core create 
command 
qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 3231369 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "core":"collection1_shard1_replica_n21",
   [junit4]   2>   "roles":null,
   [junit4]   2>   "base_url":"https://127.0.0.1:37840";,
   [junit4]   2>   "node_name":"127.0.0.1:37840_",
   [junit4]   2>   "numShards":"1",
   [junit4]   2>   "state":"down",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "collection":"collection1",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "operation":"state"} current state version: 0
   [junit4]   2> 3231756 DEBUG (ScheduledTrigger-8788-thread-4) [    ] 
o.a.s.c.a.NodeLostTrigger Running NodeLostTrigger: .auto_add_replicas with 
currently live nodes: 2
   [junit4]   2> 3232390 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 3232405 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.s.IndexSchema 
[collection1_shard1_replica_n21] Schema name=test
   [junit4]   2> 3232510 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.s.IndexSchema 
Loaded schema test/1.6 with uniqueid field id
   [junit4]   2> 3232530 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1_shard1_replica_n21' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 3232530 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.collection1.shard1.replica_n21' (registry 
'solr.core.collection1.shard1.replica_n21') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@16b9a5e6
   [junit4]   2> 3232531 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SolrCore 
[[collection1_shard1_replica_n21] ] Opening new SolrCore at 
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/shard-1-001/cores/collection1_shard1_replica_n21],
 
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J2/temp/solr.cloud.hdfs.HdfsRestartWhileUpdatingTest_632D3BDB9FC82BE0-001/shard-1-001/cores/collection1_shard1_replica_n21/data/]
   [junit4]   2> 3232535 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=21, maxMergeAtOnceExplicit=33, maxMergedSegmentMB=90.9609375, 
floorSegmentMB=1.1337890625, forceMergeDeletesPctAllowed=5.110120031139312, 
segmentsPerTier=17.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.5329788485702183, deletesPctAllowed=33.27283212168082
   [junit4]   2> 3232545 WARN  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 3232607 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.UpdateHandler 
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
   [junit4]   2> 3232607 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 3232608 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.CommitTracker 
Hard AutoCommit: if uncommitted for 30000ms; 
   [junit4]   2> 3232608 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.CommitTracker 
Soft AutoCommit: if uncommitted for 3000ms; 
   [junit4]   2> 3232610 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.MockRandomMergePolicy: 
org.apache.lucene.index.MockRandomMergePolicy@5c6232c
   [junit4]   2> 3232611 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@292cfd2c[collection1_shard1_replica_n21] main]
   [junit4]   2> 3232612 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 3232613 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 3232614 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 3232614 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1616848991715065856
   [junit4]   2> 3232615 INFO  
(searcherExecutor-8809-thread-1-processing-n:127.0.0.1:37840_ 
x:collection1_shard1_replica_n21 c:collection1 s:shard1) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SolrCore 
[collection1_shard1_replica_n21] Registered new searcher 
Searcher@292cfd2c[collection1_shard1_replica_n21] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 3232623 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.ZkShardTerms 
Successful update of terms at /collections/collection1/terms/shard1 to 
Terms{values={core_node22=0}, version=0}
   [junit4]   2> 3232628 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 3232628 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 3232628 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy 
Sync replicas to https://127.0.0.1:37840/collection1_shard1_replica_n21/
   [junit4]   2> 3232628 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy 
Sync Success - now sync replicas to me
   [junit4]   2> 3232629 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy 
https://127.0.0.1:37840/collection1_shard1_replica_n21/ has no replicas
   [junit4]   2> 3232633 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
https://127.0.0.1:37840/collection1_shard1_replica_n21/ shard1
   [junit4]   2> 3232634 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "operation":"leader",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "collection":"collection1",
   [junit4]   2>   "base_url":"https://127.0.0.1:37840";,
   [junit4]   2>   "core":"collection1_shard1_replica_n21",
   [junit4]   2>   "state":"active"} current state version: 0
   [junit4]   2> 3232757 DEBUG (ScheduledTrigger-8788-thread-3) [    ] 
o.a.s.c.a.NodeLostTrigger Running NodeLostTrigger: .auto_add_replicas with 
currently live nodes: 2
   [junit4]   2> 3232787 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.ZkController 
I am the leader, no recovery necessary
   [junit4]   2> 3232788 DEBUG 
(OverseerStateUpdate-72259584929300485-127.0.0.1:38742_-n_0000000000) 
[n:127.0.0.1:38742_    ] o.a.s.c.Overseer processMessage: queueSize: 1, message 
= {
   [junit4]   2>   "core":"collection1_shard1_replica_n21",
   [junit4]   2>   "core_node_name":"core_node22",
   [junit4]   2>   "roles":null,
   [junit4]   2>   "base_url":"https://127.0.0.1:37840";,
   [junit4]   2>   "node_name":"127.0.0.1:37840_",
   [junit4]   2>   "numShards":"1",
   [junit4]   2>   "state":"active",
   [junit4]   2>   "shard":"shard1",
   [junit4]   2>   "collection":"collection1",
   [junit4]   2>   "type":"NRT",
   [junit4]   2>   "operation":"state"} current state version: 0
   [junit4]   2> 3232789 INFO  (qtp1625444062-27991) [n:127.0.0.1:37840_ 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.s.HttpSolrCall 
[admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=1423
   [junit4]   2> 3232792 INFO  (qtp1625444062-27989) [n:127.0.0.1:37840_ 
c:collection1   ] o.a.s.s.HttpSolrCall [admin] webapp=null 
path=/admin/collections 
params={node=127.0.0.1:37840_&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2}
 status=0 QTime=1467
   [junit4]   2> 3232890 INFO  (zkCallback-9121-thread-1) [    ] 
o.a.s.c.c.ZkStateReader A cluster state change:

[...truncated too long message...]

ve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.

-ivy-fail-disallowed-ivy-version:

ivy-fail:

ivy-fail:

ivy-configure:
[ivy:configure] :: loading settings :: file = 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml

resolve:

jar-checksums:
    [mkdir] Created dir: 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/null692644586
     [copy] Copying 240 files to 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/null692644586
   [delete] Deleting directory 
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/null692644586

check-working-copy:
[ivy:cachepath] :: resolving dependencies :: 
org.eclipse.jgit#org.eclipse.jgit-caller;working
[ivy:cachepath]         confs: [default]
[ivy:cachepath]         found 
org.eclipse.jgit#org.eclipse.jgit;4.6.0.201612231935-r in public
[ivy:cachepath]         found com.jcraft#jsch;0.1.53 in public
[ivy:cachepath]         found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath]         found org.apache.httpcomponents#httpclient;4.3.6 in 
public
[ivy:cachepath]         found org.apache.httpcomponents#httpcore;4.3.3 in public
[ivy:cachepath]         found commons-logging#commons-logging;1.1.3 in public
[ivy:cachepath]         found commons-codec#commons-codec;1.6 in public
[ivy:cachepath]         found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 63ms :: artifacts dl 6ms
        ---------------------------------------------------------------------
        |                  |            modules            ||   artifacts   |
        |       conf       | number| search|dwnlded|evicted|| number|dwnlded|
        ---------------------------------------------------------------------
        |      default     |   8   |   0   |   0   |   0   ||   8   |   0   |
        ---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] SLF4J: Failed to load class "org.slf4j.impl.StaticLoggerBinder".
[wc-checker] SLF4J: Defaulting to no-operation (NOP) logger implementation
[wc-checker] SLF4J: See http://www.slf4j.org/codes.html#StaticLoggerBinder for 
further details.
[wc-checker] Checking working copy status...

-jenkins-base:

BUILD SUCCESSFUL
Total time: 312 minutes 28 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
        at hudson.FilePath$34.hasMatch(FilePath.java:2678)
        at hudson.FilePath$34.invoke(FilePath.java:2557)
        at hudson.FilePath$34.invoke(FilePath.java:2547)
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2918)
Also:   hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene
                at 
hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
                at 
hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
                at hudson.remoting.Channel.call(Channel.java:955)
                at hudson.FilePath.act(FilePath.java:1036)
                at hudson.FilePath.act(FilePath.java:1025)
                at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
                at 
hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
                at 
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
                at 
hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
                at 
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
                at 
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
                at hudson.model.Build$BuildExecution.post2(Build.java:186)
                at 
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
                at hudson.model.Run.execute(Run.java:1819)
                at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
                at 
hudson.model.ResourceController.execute(ResourceController.java:97)
                at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
        at hudson.FilePath$FileCallableWrapper.call(FilePath.java:2920)
        at hudson.remoting.UserRequest.perform(UserRequest.java:212)
        at hudson.remoting.UserRequest.perform(UserRequest.java:54)
        at hudson.remoting.Request$2.run(Request.java:369)
        at 
hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
        at java.util.concurrent.FutureTask.run(FutureTask.java:266)
        at 
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
        at 
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
        at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no 
matches found within 10000
        at hudson.FilePath.act(FilePath.java:1038)
        at hudson.FilePath.act(FilePath.java:1025)
        at hudson.FilePath.validateAntFileMask(FilePath.java:2547)
        at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
        at 
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
        at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
        at hudson.model.Build$BuildExecution.post2(Build.java:186)
        at 
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
        at hudson.model.Run.execute(Run.java:1819)
        at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
        at hudson.model.ResourceController.execute(ResourceController.java:97)
        at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern 
"**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)
---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

Reply via email to