Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1836/
3 tests failed.
FAILED:
org.apache.solr.cloud.api.collections.ShardSplitTest.testSplitWithChaosMonkey
Error Message:
Address already in use
Stack Trace:
java.net.BindException: Address already in use
at
__randomizedtesting.SeedInfo.seed([EECF44E6B8D77083:65E89737F9D1DB07]:0)
at java.base/sun.nio.ch.Net.bind0(Native Method)
at java.base/sun.nio.ch.Net.bind(Net.java:461)
at java.base/sun.nio.ch.Net.bind(Net.java:453)
at
java.base/sun.nio.ch.ServerSocketChannelImpl.bind(ServerSocketChannelImpl.java:227)
at
java.base/sun.nio.ch.ServerSocketAdaptor.bind(ServerSocketAdaptor.java:80)
at
org.eclipse.jetty.server.ServerConnector.openAcceptChannel(ServerConnector.java:342)
at
org.eclipse.jetty.server.ServerConnector.open(ServerConnector.java:308)
at
org.eclipse.jetty.server.AbstractNetworkConnector.doStart(AbstractNetworkConnector.java:80)
at
org.eclipse.jetty.server.ServerConnector.doStart(ServerConnector.java:236)
at
org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:68)
at org.eclipse.jetty.server.Server.doStart(Server.java:394)
at
org.eclipse.jetty.util.component.AbstractLifeCycle.start(AbstractLifeCycle.java:68)
at
org.apache.solr.client.solrj.embedded.JettySolrRunner.retryOnPortBindFailure(JettySolrRunner.java:558)
at
org.apache.solr.client.solrj.embedded.JettySolrRunner.start(JettySolrRunner.java:497)
at
org.apache.solr.client.solrj.embedded.JettySolrRunner.start(JettySolrRunner.java:465)
at
org.apache.solr.cloud.api.collections.ShardSplitTest.testSplitWithChaosMonkey(ShardSplitTest.java:499)
at
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.base/java.lang.reflect.Method.invoke(Method.java:566)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1750)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:938)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:974)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:988)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1082)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:1054)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:947)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:832)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:883)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:894)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.base/java.lang.Thread.run(Thread.java:834)
FAILED:
junit.framework.TestSuite.org.apache.solr.cloud.api.collections.ShardSplitTest
Error Message:
10 threads leaked from SUITE scope at
org.apache.solr.cloud.api.collections.ShardSplitTest: 1) Thread[id=10373,
name=qtp1341992107-10373, state=RUNNABLE, group=TGRP-ShardSplitTest] at
[email protected]/sun.nio.ch.EPoll.wait(Native Method) at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source) at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834) 2)
Thread[id=10377, name=qtp1341992107-10377, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834) 3)
Thread[id=10378, name=qtp1341992107-10378, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834) 4)
Thread[id=10374, name=qtp1341992107-10374, state=RUNNABLE,
group=TGRP-ShardSplitTest] at
[email protected]/sun.nio.ch.EPoll.wait(Native Method) at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source) at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834) 5)
Thread[id=10379, name=qtp1341992107-10379, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834) 6)
Thread[id=10381, name=Scheduler-1459405786, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834) 7)
Thread[id=10380, name=qtp1341992107-10380, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834) 8)
Thread[id=10506, name=Scheduler-1271050636, state=WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834) 9)
Thread[id=10375,
name=qtp1341992107-10375-acceptor-0@65be2018-ServerConnector@3aebc4d9{HTTP/1.1,[http/1.1,
h2c]}{127.0.0.1:34962}, state=RUNNABLE, group=TGRP-ShardSplitTest] at
[email protected]/sun.nio.ch.ServerSocketChannelImpl.accept0(Native Method)
at
[email protected]/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:533)
at
[email protected]/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:285)
at
app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:385)
at
app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:648)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834) 10)
Thread[id=10376, name=qtp1341992107-10376, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834)
Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: 10 threads leaked from
SUITE scope at org.apache.solr.cloud.api.collections.ShardSplitTest:
1) Thread[id=10373, name=qtp1341992107-10373, state=RUNNABLE,
group=TGRP-ShardSplitTest]
at [email protected]/sun.nio.ch.EPoll.wait(Native Method)
at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834)
2) Thread[id=10377, name=qtp1341992107-10377, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834)
3) Thread[id=10378, name=qtp1341992107-10378, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834)
4) Thread[id=10374, name=qtp1341992107-10374, state=RUNNABLE,
group=TGRP-ShardSplitTest]
at [email protected]/sun.nio.ch.EPoll.wait(Native Method)
at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834)
5) Thread[id=10379, name=qtp1341992107-10379, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834)
6) Thread[id=10381, name=Scheduler-1459405786, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834)
7) Thread[id=10380, name=qtp1341992107-10380, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834)
8) Thread[id=10506, name=Scheduler-1271050636, state=WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834)
9) Thread[id=10375,
name=qtp1341992107-10375-acceptor-0@65be2018-ServerConnector@3aebc4d9{HTTP/1.1,[http/1.1,
h2c]}{127.0.0.1:34962}, state=RUNNABLE, group=TGRP-ShardSplitTest]
at [email protected]/sun.nio.ch.ServerSocketChannelImpl.accept0(Native
Method)
at
[email protected]/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:533)
at
[email protected]/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:285)
at
app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:385)
at
app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:648)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834)
10) Thread[id=10376, name=qtp1341992107-10376, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:392)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.idleJobPoll(QueuedThreadPool.java:656)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.access$800(QueuedThreadPool.java:46)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:720)
at [email protected]/java.lang.Thread.run(Thread.java:834)
at __randomizedtesting.SeedInfo.seed([EECF44E6B8D77083]:0)
FAILED:
junit.framework.TestSuite.org.apache.solr.cloud.api.collections.ShardSplitTest
Error Message:
There are still zombie threads that couldn't be terminated: 1)
Thread[id=10373, name=qtp1341992107-10373, state=RUNNABLE,
group=TGRP-ShardSplitTest] at
[email protected]/sun.nio.ch.EPoll.wait(Native Method) at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source) at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834) 2)
Thread[id=10374, name=qtp1341992107-10374, state=RUNNABLE,
group=TGRP-ShardSplitTest] at
[email protected]/sun.nio.ch.EPoll.wait(Native Method) at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source) at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834) 3)
Thread[id=10381, name=Scheduler-1459405786, state=TIMED_WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834) 4)
Thread[id=10506, name=Scheduler-1271050636, state=WAITING,
group=TGRP-ShardSplitTest] at
[email protected]/jdk.internal.misc.Unsafe.park(Native Method) at
[email protected]/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834)
Stack Trace:
com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie
threads that couldn't be terminated:
1) Thread[id=10373, name=qtp1341992107-10373, state=RUNNABLE,
group=TGRP-ShardSplitTest]
at [email protected]/sun.nio.ch.EPoll.wait(Native Method)
at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834)
2) Thread[id=10374, name=qtp1341992107-10374, state=RUNNABLE,
group=TGRP-ShardSplitTest]
at [email protected]/sun.nio.ch.EPoll.wait(Native Method)
at
[email protected]/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:120)
at
[email protected]/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:124)
at
[email protected]/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:423)
at
app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:360)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:357)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:181)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168)
at
app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:132)
at
app//org.eclipse.jetty.io.ManagedSelector$$Lambda$157/0x0000000100312c40.run(Unknown
Source)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:765)
at
app//org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:683)
at [email protected]/java.lang.Thread.run(Thread.java:834)
3) Thread[id=10381, name=Scheduler-1459405786, state=TIMED_WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:234)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:2123)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834)
4) Thread[id=10506, name=Scheduler-1271050636, state=WAITING,
group=TGRP-ShardSplitTest]
at [email protected]/jdk.internal.misc.Unsafe.park(Native Method)
at
[email protected]/java.util.concurrent.locks.LockSupport.park(LockSupport.java:194)
at
[email protected]/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2081)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170)
at
[email protected]/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1054)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1114)
at
[email protected]/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at [email protected]/java.lang.Thread.run(Thread.java:834)
at __randomizedtesting.SeedInfo.seed([EECF44E6B8D77083]:0)
Build Log:
[...truncated 13689 lines...]
[junit4] Suite: org.apache.solr.cloud.api.collections.ShardSplitTest
[junit4] 2> 780600 INFO
(SUITE-ShardSplitTest-seed#[EECF44E6B8D77083]-worker) [ ]
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks:
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> Creating dataDir:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/init-core-data-001
[junit4] 2> 780608 WARN
(SUITE-ShardSplitTest-seed#[EECF44E6B8D77083]-worker) [ ]
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=150 numCloses=150
[junit4] 2> 780608 INFO
(SUITE-ShardSplitTest-seed#[EECF44E6B8D77083]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true)
w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 780609 INFO
(SUITE-ShardSplitTest-seed#[EECF44E6B8D77083]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via:
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl="https://issues.apache.org/jira/browse/SOLR-5776")
[junit4] 2> 780609 INFO
(SUITE-ShardSplitTest-seed#[EECF44E6B8D77083]-worker) [ ]
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
[junit4] 2> 780613 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 780626 INFO (ZkTestServer Run Thread) [ ]
o.a.s.c.ZkTestServer client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 780626 INFO (ZkTestServer Run Thread) [ ]
o.a.s.c.ZkTestServer Starting server
[junit4] 2> 780726 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer start zk server on port:33255
[junit4] 2> 780726 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer parse host and port list: 127.0.0.1:33255
[junit4] 2> 780726 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer connecting to 127.0.0.1 33255
[junit4] 2> 780760 INFO (zkConnectionManagerCallback-2838-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 780765 INFO (zkConnectionManagerCallback-2840-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 780766 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
to /configs/conf1/solrconfig.xml
[junit4] 2> 780768 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/schema15.xml
to /configs/conf1/schema.xml
[junit4] 2> 780769 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 780771 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
to /configs/conf1/stopwords.txt
[junit4] 2> 780772 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt
to /configs/conf1/protwords.txt
[junit4] 2> 780773 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml
to /configs/conf1/currency.xml
[junit4] 2> 780775 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
to /configs/conf1/enumsConfig.xml
[junit4] 2> 780776 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
to /configs/conf1/open-exchange-rates.json
[junit4] 2> 780777 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 780779 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
to /configs/conf1/old_synonyms.txt
[junit4] 2> 780780 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkTestServer put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
to /configs/conf1/synonyms.txt
[junit4] 2> 780781 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly
asked otherwise
[junit4] 2> 780941 WARN
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
[junit4] 2> 780942 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
[junit4] 2> 780942 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
[junit4] 2> 780942 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git:
c4550056e785fb5665914545889f21dc136ad9e6; jvm 11.0.1+13-LTS
[junit4] 2> 780943 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.session DefaultSessionIdManager workerName=node0
[junit4] 2> 780943 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.session No SessionScavenger set, using defaults
[junit4] 2> 780943 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.session node0 Scavenging every 660000ms
[junit4] 2> 780944 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@413c725a{/,null,AVAILABLE}
[junit4] 2> 780946 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.AbstractConnector Started ServerConnector@6740a9a0{HTTP/1.1,[http/1.1,
h2c]}{127.0.0.1:46466}
[junit4] 2> 780946 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.s.Server Started @781000ms
[junit4] 2> 780946 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties: {hostContext=/,
solr.data.dir=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/tempDir-001/control/data,
hostPort=46466,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/../../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/control-001/cores,
replicaType=NRT}
[junit4] 2> 780946 ERROR
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 780946 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.s.SolrDispatchFilter Using logger factory
org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 780947 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
9.0.0
[junit4] 2> 780947 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 780947 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 780947 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2019-04-30T12:08:30.256209Z
[junit4] 2> 780949 INFO (zkConnectionManagerCallback-2842-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 780951 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 780951 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/control-001/solr.xml
[junit4] 2> 780955 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay
is ignored
[junit4] 2> 780955 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.SolrXmlConfig Configuration parameter
autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 780957 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@7eef652, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 781064 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.h.c.HttpShardHandlerFactory Host whitelist initialized:
WhitelistHostChecker [whitelistHosts=null, whitelistHostCheckingEnabled=false]
[junit4] 2> 781065 WARN
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for
SslContextFactory@6c76d357[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 781069 WARN
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.e.j.u.s.S.config No Client EndPointIdentificationAlgorithm configured for
SslContextFactory@37c37bd0[provider=null,keyStore=null,trustStore=null]
[junit4] 2> 781070 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:33255/solr
[junit4] 2> 781073 INFO (zkConnectionManagerCallback-2849-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 781076 INFO (zkConnectionManagerCallback-2851-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 781175 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerElectionContext I am going to be the
leader 127.0.0.1:46466_
[junit4] 2> 781176 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer Overseer
(id=73221569865187332-127.0.0.1:46466_-n_0000000000) starting
[junit4] 2> 781184 INFO (zkConnectionManagerCallback-2858-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 781187 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at
127.0.0.1:33255/solr ready
[junit4] 2> 781188 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Process current queue
of overseer operations
[junit4] 2> 781188 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:46466_
[junit4] 2> 781188 INFO
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer Starting to work on the main queue :
127.0.0.1:46466_
[junit4] 2> 781190 INFO
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 781190 INFO (zkCallback-2857-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 781192 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue.
#Running tasks: 0 #Completed tasks: 0
[junit4] 2> 781192 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningTasks: []
[junit4] 2> 781192 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 781192 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 781192 INFO
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks: []
[junit4] 2> 781222 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.h.a.MetricsHistoryHandler No .system collection,
keeping metrics history in memory.
[junit4] 2> 781256 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@7eef652
[junit4] 2> 781275 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@7eef652
[junit4] 2> 781275 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@7eef652
[junit4] 2> 781278 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083])
[n:127.0.0.1:46466_ ] o.a.s.c.CorePropertiesLocator Found 0 core definitions
underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/../../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/control-001/cores
[junit4] 2> 781308 INFO (zkConnectionManagerCallback-2864-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 781310 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 781311 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:33255/solr ready
[junit4] 2> 781315 INFO (qtp66998495-7800) [n:127.0.0.1:46466_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:46466_&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 781323 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Got 1 tasks from
work-queue : [[org.apache.solr.cloud.OverseerTaskQueue$QueueEvent@2e684ecf]]
[junit4] 2> 781323 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Marked task
[/overseer/collection-queue-work/qn-0000000000] as running
[junit4] 2> 781323 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Overseer Collection
Message Handler: Get the message
id:/overseer/collection-queue-work/qn-0000000000 message:{
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:46466_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 781327 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue.
#Running tasks: 1 #Completed tasks: 0
[junit4] 2> 781327 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningTasks:
[/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 781327 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 781327 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 781327 INFO
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks:
[/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 781328 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Runner processing
/overseer/collection-queue-work/qn-0000000000
[junit4] 2> 781328 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler
OverseerCollectionMessageHandler.processMessage : create , {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:46466_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 781329 INFO
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection
control_collection
[junit4] 2> 781330 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler creating
collections conf node /collections/control_collection
[junit4] 2> 781331 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Check for collection
zkNode:control_collection
[junit4] 2> 781333 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Collection zkNode
exists
[junit4] 2> 781335 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message
= {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:46466_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"} current state version: 0
[junit4] 2> 781335 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ClusterStateMutator building a new cName:
control_collection
[junit4] 2> 781337 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ZkStateWriter going to create_collection
/collections/control_collection/state.json
[junit4] 2> 781441 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Creating SolrCores for
new collection control_collection, shardNames [shard1] , message : {
[junit4] 2> "name":"control_collection",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"1",
[junit4] 2> "createNodeSet":"127.0.0.1:46466_",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 781443 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Creating core
control_collection_shard1_replica_n1 as part of shard shard1 of collection
control_collection on 127.0.0.1:46466_
[junit4] 2> 781445 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
x:control_collection_shard1_replica_n1] o.a.s.h.a.CoreAdminOperation core
create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 781445 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
x:control_collection_shard1_replica_n1] o.a.s.c.TransientSolrCoreCacheDefault
Allocating transient cache for 4 transient cores
[junit4] 2> 781449 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message
= {
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "node_name":"127.0.0.1:46466_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"down",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"} current state version: 0
[junit4] 2> 781449 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ReplicaMutator Update state numShards=1
message={
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "node_name":"127.0.0.1:46466_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"down",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"}
[junit4] 2> 781451 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ReplicaMutator Will update state for
replica:
core_node2:{"core":"control_collection_shard1_replica_n1","base_url":"http://127.0.0.1:46466","node_name":"127.0.0.1:46466_","state":"down","type":"NRT"}
[junit4] 2> 781451 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ReplicaMutator Collection is now:
DocCollection(control_collection//collections/control_collection/state.json/0)={
[junit4] 2> "pullReplicas":"0",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "router":{"name":"compositeId"},
[junit4] 2> "maxShardsPerNode":"1",
[junit4] 2> "autoAddReplicas":"false",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "tlogReplicas":"0",
[junit4] 2> "shards":{"shard1":{
[junit4] 2> "range":"80000000-7fffffff",
[junit4] 2> "state":"active",
[junit4] 2> "replicas":{"core_node2":{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "node_name":"127.0.0.1:46466_",
[junit4] 2> "state":"down",
[junit4] 2> "type":"NRT"}}}}}
[junit4] 2> 781552 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ZkStateWriter going to update_collection
/collections/control_collection/state.json version: 0
[junit4] 2> 782462 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrConfig Using Lucene MatchVersion: 9.0.0
[junit4] 2> 782483 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
[junit4] 2> 782576 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.IndexSchema Loaded schema test/1.6 with uniqueid field id
[junit4] 2> 782603 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1'
using configuration from collection control_collection, trusted=true
[junit4] 2> 782603 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.control_collection.shard1.replica_n1' (registry
'solr.core.control_collection.shard1.replica_n1') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@7eef652
[junit4] 2> 782604 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore
at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/control-001/cores/control_collection_shard1_replica_n1],
dataDir=[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/../../../../../../../../../../../x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/control-001/cores/control_collection_shard1_replica_n1/data/]
[junit4] 2> 782608 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=43, maxMergeAtOnceExplicit=16, maxMergedSegmentMB=3.951171875,
floorSegmentMB=1.4609375, forceMergeDeletesPctAllowed=26.619540138359685,
segmentsPerTier=20.0, maxCFSSegmentSizeMB=1.8681640625, noCFSRatio=1.0,
deletesPctAllowed=49.95797676489698
[junit4] 2> 782619 WARN (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 782688 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.UpdateLog
[junit4] 2> 782688 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 782691 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 782691 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 782694 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=40, maxMergeAtOnceExplicit=15, maxMergedSegmentMB=70.234375,
floorSegmentMB=2.1064453125, forceMergeDeletesPctAllowed=19.50549141649115,
segmentsPerTier=34.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0,
deletesPctAllowed=46.84648941943949
[junit4] 2> 782695 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@19d35147[control_collection_shard1_replica_n1] main]
[junit4] 2> 782697 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 782697 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 782698 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 782698 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using
new clock 1632240782023852032
[junit4] 2> 782703 INFO
(searcherExecutor-2511-thread-1-processing-n:127.0.0.1:46466_
x:control_collection_shard1_replica_n1 c:control_collection s:shard1)
[n:127.0.0.1:46466_ c:control_collection s:shard1
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore
[control_collection_shard1_replica_n1] Registered new searcher
Searcher@19d35147[control_collection_shard1_replica_n1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 782706 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ZkShardTerms Successful update of terms at
/collections/control_collection/terms/shard1 to Terms{values={core_node2=0},
version=0}
[junit4] 2> 782707 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContextBase make sure parent is created
/collections/control_collection/leaders/shard1
[junit4] 2> 782711 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 782711 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 782711 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:46466/control_collection_shard1_replica_n1/
[junit4] 2> 782711 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 782712 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy
http://127.0.0.1:46466/control_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 782712 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContextBase Creating leader registration node
/collections/control_collection/leaders/shard1/leader after winning as
/collections/control_collection/leader_elect/shard1/election/73221569865187332-core_node2-n_0000000000
[junit4] 2> 782714 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:46466/control_collection_shard1_replica_n1/ shard1
[junit4] 2> 782715 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message
= {
[junit4] 2> "operation":"leader",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "state":"active"} current state version: 0
[junit4] 2> 782816 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ZkStateWriter going to update_collection
/collections/control_collection/state.json version: 1
[junit4] 2> 782816 INFO (zkCallback-2850-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 782816 INFO (zkCallback-2850-thread-2) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 782819 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 782820 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message
= {
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "core_node_name":"core_node2",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "node_name":"127.0.0.1:46466_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"active",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"} current state version: 0
[junit4] 2> 782820 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ReplicaMutator Update state numShards=1
message={
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "core_node_name":"core_node2",
[junit4] 2> "roles":null,
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "node_name":"127.0.0.1:46466_",
[junit4] 2> "numShards":"1",
[junit4] 2> "state":"active",
[junit4] 2> "shard":"shard1",
[junit4] 2> "collection":"control_collection",
[junit4] 2> "type":"NRT",
[junit4] 2> "operation":"state"}
[junit4] 2> 782820 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ReplicaMutator Will update state for
replica:
core_node2:{"core":"control_collection_shard1_replica_n1","base_url":"http://127.0.0.1:46466","node_name":"127.0.0.1:46466_","state":"active","type":"NRT","leader":"true"}
[junit4] 2> 782821 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ReplicaMutator Collection is now:
DocCollection(control_collection//collections/control_collection/state.json/2)={
[junit4] 2> "pullReplicas":"0",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "router":{"name":"compositeId"},
[junit4] 2> "maxShardsPerNode":"1",
[junit4] 2> "autoAddReplicas":"false",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "tlogReplicas":"0",
[junit4] 2> "shards":{"shard1":{
[junit4] 2> "range":"80000000-7fffffff",
[junit4] 2> "state":"active",
[junit4] 2> "replicas":{"core_node2":{
[junit4] 2> "core":"control_collection_shard1_replica_n1",
[junit4] 2> "base_url":"http://127.0.0.1:46466",
[junit4] 2> "node_name":"127.0.0.1:46466_",
[junit4] 2> "state":"active",
[junit4] 2> "type":"NRT",
[junit4] 2> "leader":"true"}}}}}
[junit4] 2> 782822 INFO (qtp66998495-7802) [n:127.0.0.1:46466_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
status=0 QTime=1377
[junit4] 2> 782822 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Finished create
command on all shards for collection: control_collection
[junit4] 2> 782824 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Completed
task:[/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 782826 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Marked task
[/overseer/collection-queue-work/qn-0000000000] as completed.
[junit4] 2> 782826 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningTasks: []
[junit4] 2> 782826 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 782826 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks:
[/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 782826 INFO
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks:
[/overseer/collection-queue-work/qn-0000000000]
[junit4] 2> 782826 DEBUG
(OverseerThreadFactory-2506-thread-1-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Overseer Collection
Message Handler: Message id:/overseer/collection-queue-work/qn-0000000000
complete,
response:{success={127.0.0.1:46466_={responseHeader={status=0,QTime=1377},core=control_collection_shard1_replica_n1}}}
[junit4] 2> 782831 INFO (qtp66998495-7800) [n:127.0.0.1:46466_ ]
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
45 seconds. Check all shard replicas
[junit4] 2> 782921 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ZkStateWriter going to update_collection
/collections/control_collection/state.json version: 2
[junit4] 2> 782922 INFO (zkCallback-2850-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 782922 INFO (zkCallback-2850-thread-2) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 782922 INFO (zkCallback-2850-thread-3) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 782923 INFO (qtp66998495-7800) [n:127.0.0.1:46466_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:46466_&wt=javabin&version=2}
status=0 QTime=1607
[junit4] 2> 782926 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ]
o.a.z.s.NIOServerCnxn Unable to read additional data from client sessionid
0x10422a13db60006, likely client has closed socket
[junit4] 2> 782929 INFO (zkConnectionManagerCallback-2870-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 782935 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 782937 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:33255/solr ready
[junit4] 2> 782937 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection
loss:false
[junit4] 2> 782938 INFO (qtp66998495-7802) [n:127.0.0.1:46466_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=2&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 782941 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Got 1 tasks from
work-queue : [[org.apache.solr.cloud.OverseerTaskQueue$QueueEvent@2e684ed1]]
[junit4] 2> 782942 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Marked task
[/overseer/collection-queue-work/qn-0000000002] as running
[junit4] 2> 782942 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Overseer Collection
Message Handler: Get the message
id:/overseer/collection-queue-work/qn-0000000002 message:{
[junit4] 2> "name":"collection1",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"2",
[junit4] 2> "createNodeSet":"",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 782942 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Cleaning up work-queue.
#Running tasks: 1 #Completed tasks: 1
[junit4] 2> 782943 INFO
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 782943 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Runner processing
/overseer/collection-queue-work/qn-0000000002
[junit4] 2> 782943 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningTasks:
[/overseer/collection-queue-work/qn-0000000002]
[junit4] 2> 782943 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 782943 DEBUG
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks: []
[junit4] 2> 782943 INFO
(OverseerCollectionConfigSetProcessor-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks:
[/overseer/collection-queue-work/qn-0000000002]
[junit4] 2> 782943 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler
OverseerCollectionMessageHandler.processMessage : create , {
[junit4] 2> "name":"collection1",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"2",
[junit4] 2> "createNodeSet":"",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"}
[junit4] 2> 782944 INFO
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Create collection
collection1
[junit4] 2> 782945 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.OverseerCollectionMessageHandler creating
collections conf node /collections/collection1
[junit4] 2> 782946 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Check for collection
zkNode:collection1
[junit4] 2> 782947 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Collection zkNode
exists
[junit4] 2> 782949 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.Overseer processMessage: queueSize: 1, message
= {
[junit4] 2> "name":"collection1",
[junit4] 2> "fromApi":"true",
[junit4] 2> "collection.configName":"conf1",
[junit4] 2> "numShards":"2",
[junit4] 2> "createNodeSet":"",
[junit4] 2> "stateFormat":"2",
[junit4] 2> "nrtReplicas":"1",
[junit4] 2> "replicationFactor":"1",
[junit4] 2> "operation":"create"} current state version: 0
[junit4] 2> 782949 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ClusterStateMutator building a new cName:
collection1
[junit4] 2> 783050 DEBUG
(OverseerStateUpdate-73221569865187332-127.0.0.1:46466_-n_0000000000)
[n:127.0.0.1:46466_ ] o.a.s.c.o.ZkStateWriter going to create_collection
/collections/collection1/state.json
[junit4] 2> 783150 WARN
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd It is unusual to
create a collection (collection1) without cores.
[junit4] 2> 783150 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.a.c.CreateCollectionCmd Finished create
command for collection: collection1
[junit4] 2> 783150 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Completed
task:[/overseer/collection-queue-work/qn-0000000002]
[junit4] 2> 783152 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Marked task
[/overseer/collection-queue-work/qn-0000000002] as completed.
[junit4] 2> 783152 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningTasks: []
[junit4] 2> 783152 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor BlockedTasks: []
[junit4] 2> 783152 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor CompletedTasks:
[/overseer/collection-queue-work/qn-0000000002]
[junit4] 2> 783152 INFO
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor RunningZKTasks:
[/overseer/collection-queue-work/qn-0000000002]
[junit4] 2> 783152 DEBUG
(OverseerThreadFactory-2506-thread-2-processing-n:127.0.0.1:46466_)
[n:127.0.0.1:46466_ ] o.a.s.c.OverseerTaskProcessor Overseer Collection
Message Handler: Message id:/overseer/collection-queue-work/qn-0000000002
complete, response:{}
[junit4] 2> 783153 INFO (qtp66998495-7802) [n:127.0.0.1:46466_ ]
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
45 seconds. Check all shard replicas
[junit4] 2> 783153 INFO (qtp66998495-7802) [n:127.0.0.1:46466_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=2&wt=javabin&version=2}
status=0 QTime=215
[junit4] 2> 783154 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Creating jetty instances
pullReplicaCount=0 numOtherReplicas=4
[junit4] 2> 783317 INFO
(TEST-ShardSplitTest.testSplitShardWithRule-seed#[EECF44E6B8D77083]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J1/temp/solr.cloud.api.collections.ShardSplitTest_EECF44E6B8D77083-001/shard-1-001
of type NRT
[junit4] 2> 783320 WARN (closeThreadPool-2871-thread-1) [ ]
o.e.j.s.AbstractConnector Ignoring deprecated socket close linger time
[junit4] 2> 783321 INFO (closeThreadPool-2871-thread-1) [ ]
o.a.s.c.s.e.JettySolrRunner Start Jetty (original configured port=0)
[junit4] 2> 783321 INFO (closeThreadPool-2871-thread-1) [ ]
o.a.s.c.s.e.JettySolrRunner Trying to start Jetty on port 0 try number 1 ...
[junit4] 2> 783321 INFO (closeThreadPool-2871-thread-1) [ ]
o.e.j.s.Server jetty-9.4.14.v20181114; built: 2018-11-14T21:20:31.478Z; git:
c4550056e785fb5665914545889f21dc136ad9e6; jvm 11.0.1+13-LTS
[junit4] 2
[...truncated too long message...]
il:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
ivy-availability-check:
[loadresource] Do not set property disallowed.ivy.jars.list as its length is 0.
-ivy-fail-disallowed-ivy-version:
ivy-fail:
ivy-fail:
ivy-configure:
[ivy:configure] :: loading settings :: file =
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/top-level-ivy-settings.xml
resolve:
jar-checksums:
[mkdir] Created dir:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/null569849341
[copy] Copying 239 files to
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/null569849341
[delete] Deleting directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/null569849341
check-working-copy:
[ivy:cachepath] :: resolving dependencies :: #;working@lucene1-us-west
[ivy:cachepath] confs: [default]
[ivy:cachepath] found
org.eclipse.jgit#org.eclipse.jgit;5.3.0.201903130848-r in public
[ivy:cachepath] found com.jcraft#jsch;0.1.54 in public
[ivy:cachepath] found com.jcraft#jzlib;1.1.1 in public
[ivy:cachepath] found com.googlecode.javaewah#JavaEWAH;1.1.6 in public
[ivy:cachepath] found org.slf4j#slf4j-api;1.7.2 in public
[ivy:cachepath] found org.bouncycastle#bcpg-jdk15on;1.60 in public
[ivy:cachepath] found org.bouncycastle#bcprov-jdk15on;1.60 in public
[ivy:cachepath] found org.bouncycastle#bcpkix-jdk15on;1.60 in public
[ivy:cachepath] found org.slf4j#slf4j-nop;1.7.2 in public
[ivy:cachepath] :: resolution report :: resolve 64ms :: artifacts dl 10ms
---------------------------------------------------------------------
| | modules || artifacts |
| conf | number| search|dwnlded|evicted|| number|dwnlded|
---------------------------------------------------------------------
| default | 9 | 0 | 0 | 0 || 9 | 0 |
---------------------------------------------------------------------
[wc-checker] Initializing working copy...
[wc-checker] Checking working copy status...
-jenkins-base:
BUILD SUCCESSFUL
Total time: 324 minutes 23 seconds
Archiving artifacts
java.lang.InterruptedException: no matches found within 10000
at hudson.FilePath$ValidateAntFileMask.hasMatch(FilePath.java:2847)
at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2726)
at hudson.FilePath$ValidateAntFileMask.invoke(FilePath.java:2707)
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3086)
Also: hudson.remoting.Channel$CallSiteStackTrace: Remote call to lucene
at
hudson.remoting.Channel.attachCallSiteStackTrace(Channel.java:1741)
at
hudson.remoting.UserRequest$ExceptionResponse.retrieve(UserRequest.java:357)
at hudson.remoting.Channel.call(Channel.java:955)
at hudson.FilePath.act(FilePath.java:1072)
at hudson.FilePath.act(FilePath.java:1061)
at hudson.FilePath.validateAntFileMask(FilePath.java:2705)
at
hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at
hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1835)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at
hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
Caused: hudson.FilePath$TunneledInterruptedException
at hudson.FilePath$FileCallableWrapper.call(FilePath.java:3088)
at hudson.remoting.UserRequest.perform(UserRequest.java:212)
at hudson.remoting.UserRequest.perform(UserRequest.java:54)
at hudson.remoting.Request$2.run(Request.java:369)
at
hudson.remoting.InterceptingExecutorService$1.call(InterceptingExecutorService.java:72)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at
java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)
at
java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)
at java.lang.Thread.run(Thread.java:748)
Caused: java.lang.InterruptedException: java.lang.InterruptedException: no
matches found within 10000
at hudson.FilePath.act(FilePath.java:1074)
at hudson.FilePath.act(FilePath.java:1061)
at hudson.FilePath.validateAntFileMask(FilePath.java:2705)
at hudson.tasks.ArtifactArchiver.perform(ArtifactArchiver.java:243)
at
hudson.tasks.BuildStepCompatibilityLayer.perform(BuildStepCompatibilityLayer.java:81)
at hudson.tasks.BuildStepMonitor$1.perform(BuildStepMonitor.java:20)
at
hudson.model.AbstractBuild$AbstractBuildExecution.perform(AbstractBuild.java:744)
at
hudson.model.AbstractBuild$AbstractBuildExecution.performAllBuildSteps(AbstractBuild.java:690)
at hudson.model.Build$BuildExecution.post2(Build.java:186)
at
hudson.model.AbstractBuild$AbstractBuildExecution.post(AbstractBuild.java:635)
at hudson.model.Run.execute(Run.java:1835)
at hudson.model.FreeStyleBuild.run(FreeStyleBuild.java:43)
at hudson.model.ResourceController.execute(ResourceController.java:97)
at hudson.model.Executor.run(Executor.java:429)
No artifacts found that match the file pattern
"**/*.events,heapdumps/**,**/hs_err_pid*". Configuration error?
Recording test results
Build step 'Publish JUnit test result report' changed build result to UNSTABLE
Email was triggered for: Unstable (Test Failures)
Sending email for trigger: Unstable (Test Failures)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]