Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/350/
Java: 64bit/jdk1.8.0_144 -XX:-UseCompressedOops -XX:+UseParallelGC
1 tests failed.
FAILED: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test
Error Message:
Expected 2 of 3 replicas to be active but only found 1;
[core_node5:{"core":"c8n_1x3_lf_shard1_replica_n3","base_url":"http://127.0.0.1:45099/krk","node_name":"127.0.0.1:45099_krk","state":"active","type":"NRT","leader":"true"}];
clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/29)={
"pullReplicas":"0", "replicationFactor":"1", "shards":{"shard1":{
"range":"80000000-7fffffff", "state":"active", "replicas":{
"core_node4":{ "state":"down",
"base_url":"http://127.0.0.1:45679/krk",
"core":"c8n_1x3_lf_shard1_replica_n1",
"node_name":"127.0.0.1:45679_krk", "type":"NRT"},
"core_node5":{ "core":"c8n_1x3_lf_shard1_replica_n3",
"base_url":"http://127.0.0.1:45099/krk",
"node_name":"127.0.0.1:45099_krk", "state":"active",
"type":"NRT", "leader":"true"}, "core_node6":{
"core":"c8n_1x3_lf_shard1_replica_n2",
"base_url":"http://127.0.0.1:33885/krk",
"node_name":"127.0.0.1:33885_krk", "state":"down",
"type":"NRT"}}}}, "router":{"name":"compositeId"}, "maxShardsPerNode":"1",
"autoAddReplicas":"false", "nrtReplicas":"3", "tlogReplicas":"0"}
Stack Trace:
java.lang.AssertionError: Expected 2 of 3 replicas to be active but only found
1;
[core_node5:{"core":"c8n_1x3_lf_shard1_replica_n3","base_url":"http://127.0.0.1:45099/krk","node_name":"127.0.0.1:45099_krk","state":"active","type":"NRT","leader":"true"}];
clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/29)={
"pullReplicas":"0",
"replicationFactor":"1",
"shards":{"shard1":{
"range":"80000000-7fffffff",
"state":"active",
"replicas":{
"core_node4":{
"state":"down",
"base_url":"http://127.0.0.1:45679/krk",
"core":"c8n_1x3_lf_shard1_replica_n1",
"node_name":"127.0.0.1:45679_krk",
"type":"NRT"},
"core_node5":{
"core":"c8n_1x3_lf_shard1_replica_n3",
"base_url":"http://127.0.0.1:45099/krk",
"node_name":"127.0.0.1:45099_krk",
"state":"active",
"type":"NRT",
"leader":"true"},
"core_node6":{
"core":"c8n_1x3_lf_shard1_replica_n2",
"base_url":"http://127.0.0.1:33885/krk",
"node_name":"127.0.0.1:33885_krk",
"state":"down",
"type":"NRT"}}}},
"router":{"name":"compositeId"},
"maxShardsPerNode":"1",
"autoAddReplicas":"false",
"nrtReplicas":"3",
"tlogReplicas":"0"}
at
__randomizedtesting.SeedInfo.seed([B6F7C669EDE65067:3EA3F9B3431A3D9F]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:169)
at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:56)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
Build Log:
[...truncated 11501 lines...]
[junit4] Suite: org.apache.solr.cloud.LeaderFailoverAfterPartitionTest
[junit4] 2> 237684 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[B6F7C669EDE65067]-worker) [ ]
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks:
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> Creating dataDir:
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/init-core-data-001
[junit4] 2> 237684 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[B6F7C669EDE65067]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true)
w/NUMERIC_DOCVALUES_SYSPROP=true
[junit4] 2> 237685 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[B6F7C669EDE65067]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via:
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
[junit4] 2> 237685 INFO
(SUITE-LeaderFailoverAfterPartitionTest-seed#[B6F7C669EDE65067]-worker) [ ]
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /krk/
[junit4] 2> 237686 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 237686 INFO (Thread-381) [ ] o.a.s.c.ZkTestServer client
port:0.0.0.0/0.0.0.0:0
[junit4] 2> 237686 INFO (Thread-381) [ ] o.a.s.c.ZkTestServer Starting
server
[junit4] 2> 237687 ERROR (Thread-381) [ ] o.a.z.s.ZooKeeperServer
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action
on ERROR or SHUTDOWN server state changes
[junit4] 2> 237786 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkTestServer start zk server on port:37837
[junit4] 2> 237795 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
to /configs/conf1/solrconfig.xml
[junit4] 2> 237796 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
to /configs/conf1/schema.xml
[junit4] 2> 237796 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 237796 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
to /configs/conf1/stopwords.txt
[junit4] 2> 237797 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
to /configs/conf1/protwords.txt
[junit4] 2> 237797 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
to /configs/conf1/currency.xml
[junit4] 2> 237797 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
to /configs/conf1/enumsConfig.xml
[junit4] 2> 237798 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
to /configs/conf1/open-exchange-rates.json
[junit4] 2> 237798 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 237798 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
to /configs/conf1/old_synonyms.txt
[junit4] 2> 237799 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
to /configs/conf1/synonyms.txt
[junit4] 2> 237799 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly
asked otherwise
[junit4] 2> 237846 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 237847 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@53161815{/krk,null,AVAILABLE}
[junit4] 2> 237848 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@60f73104{HTTP/1.1,[http/1.1]}{127.0.0.1:44303}
[junit4] 2> 237848 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server Started @239413ms
[junit4] 2> 237848 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/tempDir-001/control/data,
hostContext=/krk, hostPort=45679,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/control-001/cores}
[junit4] 2> 237848 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 237848 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
7.1.0
[junit4] 2> 237848 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 237848 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 237849 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-09-02T14:08:59.780Z
[junit4] 2> 237850 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 237850 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/control-001/solr.xml
[junit4] 2> 237853 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 237855 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:37837/solr
[junit4] 2> 237886 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 237887 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.c.OverseerElectionContext I am going to be
the leader 127.0.0.1:45679_krk
[junit4] 2> 237887 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.c.Overseer Overseer
(id=98589824753205252-127.0.0.1:45679_krk-n_0000000000) starting
[junit4] 2> 237890 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:45679_krk
[junit4] 2> 237890 INFO
(zkCallback-376-thread-1-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 237930 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 237935 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 237935 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 237936 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45679_krk ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/control-001/cores
[junit4] 2> 237947 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 237947 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:37837/solr ready
[junit4] 2> 237947 INFO (SocketProxy-Acceptor-45679) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=37540,localport=45679], receiveBufferSize:531000
[junit4] 2> 237950 INFO (SocketProxy-Acceptor-45679) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=44303,localport=47800], receiveBufferSize=530904
[junit4] 2> 237950 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:45679_krk&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 237951 INFO
(OverseerThreadFactory-893-thread-1-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.CreateCollectionCmd Create collection
control_collection
[junit4] 2> 238054 INFO (SocketProxy-Acceptor-45679) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=37544,localport=45679], receiveBufferSize:531000
[junit4] 2> 238054 INFO (SocketProxy-Acceptor-45679) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=44303,localport=47804], receiveBufferSize=530904
[junit4] 2> 238056 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 238056 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 238158 INFO
(zkCallback-376-thread-1-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 239079 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.1.0
[junit4] 2> 239092 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
[junit4] 2> 239220 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 239238 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1'
using configuration from collection control_collection, trusted=true
[junit4] 2> 239239 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.control_collection.shard1.replica_n1' (registry
'solr.core.control_collection.shard1.replica_n1') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 239239 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 239239 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore
at
[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/control-001/cores/control_collection_shard1_replica_n1],
dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/control-001/cores/control_collection_shard1_replica_n1/data/]
[junit4] 2> 239241 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=15, maxMergeAtOnceExplicit=46, maxMergedSegmentMB=10.3349609375,
floorSegmentMB=1.7568359375, forceMergeDeletesPctAllowed=16.555397847684123,
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.3694449328188243
[junit4] 2> 239244 WARN (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 239284 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.UpdateLog
[junit4] 2> 239284 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 239285 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 239286 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 239287 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=42, maxMergeAtOnceExplicit=37, maxMergedSegmentMB=76.7197265625,
floorSegmentMB=0.66015625, forceMergeDeletesPctAllowed=2.9590274146266795,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
[junit4] 2> 239288 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@381dc2a7[control_collection_shard1_replica_n1] main]
[junit4] 2> 239288 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 239289 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 239289 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 239290 INFO
(searcherExecutor-896-thread-1-processing-n:127.0.0.1:45679_krk
x:control_collection_shard1_replica_n1 s:shard1 c:control_collection)
[n:127.0.0.1:45679_krk c:control_collection s:shard1
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore
[control_collection_shard1_replica_n1] Registered new searcher
Searcher@381dc2a7[control_collection_shard1_replica_n1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 239296 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using
new clock 1577437197739491328
[junit4] 2> 239308 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 239308 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 239308 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:45679/krk/control_collection_shard1_replica_n1/
[junit4] 2> 239308 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 239308 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy
http://127.0.0.1:45679/krk/control_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 239308 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in
election, clear LIR
[junit4] 2> 239317 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:45679/krk/control_collection_shard1_replica_n1/ shard1
[junit4] 2> 239423 INFO
(zkCallback-376-thread-1-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 239472 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 239472 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
status=0 QTime=1417
[junit4] 2> 239475 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
30 seconds. Check all shard replicas
[junit4] 2> 239575 INFO
(zkCallback-376-thread-2-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader A cluster state change:
[WatchedEvent state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 239959 INFO
(OverseerCollectionConfigSetProcessor-98589824753205252-127.0.0.1:45679_krk-n_0000000000)
[n:127.0.0.1:45679_krk ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 240475 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:45679_krk&wt=javabin&version=2}
status=0 QTime=2524
[junit4] 2> 240484 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 240485 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:37837/solr ready
[junit4] 2> 240485 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection
loss:false
[junit4] 2> 240485 INFO (SocketProxy-Acceptor-45679) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=37600,localport=45679], receiveBufferSize:531000
[junit4] 2> 240488 INFO (SocketProxy-Acceptor-45679) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=44303,localport=47860], receiveBufferSize=530904
[junit4] 2> 240489 INFO (qtp167089157-1918) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=1&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 240491 INFO
(OverseerThreadFactory-893-thread-2-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.CreateCollectionCmd Create collection
collection1
[junit4] 2> 240492 WARN
(OverseerThreadFactory-893-thread-2-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.CreateCollectionCmd It is unusual to create
a collection (collection1) without cores.
[junit4] 2> 240697 INFO (qtp167089157-1918) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
30 seconds. Check all shard replicas
[junit4] 2> 240697 INFO (qtp167089157-1918) [n:127.0.0.1:45679_krk ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=2&createNodeSet=&stateFormat=1&wt=javabin&version=2}
status=0 QTime=208
[junit4] 2> 240808 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-1-001
of type NRT
[junit4] 2> 240810 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 240811 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@6f15fd64{/krk,null,AVAILABLE}
[junit4] 2> 240811 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@2b845956{HTTP/1.1,[http/1.1]}{127.0.0.1:36669}
[junit4] 2> 240811 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server Started @242377ms
[junit4] 2> 240811 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/tempDir-001/jetty1,
solrconfig=solrconfig.xml, hostContext=/krk, hostPort=33885,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-1-001/cores}
[junit4] 2> 240812 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 240812 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
7.1.0
[junit4] 2> 240812 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 240812 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 240812 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-09-02T14:09:02.744Z
[junit4] 2> 240819 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 240819 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-1-001/solr.xml
[junit4] 2> 240825 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 240829 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:37837/solr
[junit4] 2> 240850 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (1)
[junit4] 2> 240851 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 240852 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:33885_krk
[junit4] 2> 240852 INFO (zkCallback-383-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 240853 INFO
(zkCallback-376-thread-2-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 240860 INFO
(zkCallback-388-thread-1-processing-n:127.0.0.1:33885_krk)
[n:127.0.0.1:33885_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (1) -> (2)
[junit4] 2> 240987 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 240993 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 240997 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 240999 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:33885_krk ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-1-001/cores
[junit4] 2> 241037 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params
node=127.0.0.1:33885_krk&action=ADDREPLICA&collection=collection1&shard=shard2&type=NRT&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 241039 INFO
(OverseerCollectionConfigSetProcessor-98589824753205252-127.0.0.1:45679_krk-n_0000000000)
[n:127.0.0.1:45679_krk ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000002 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 241039 INFO
(OverseerThreadFactory-893-thread-3-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.AddReplicaCmd Node Identified
127.0.0.1:33885_krk for creating new replica
[junit4] 2> 241040 INFO (SocketProxy-Acceptor-33885) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=45398,localport=33885], receiveBufferSize:531000
[junit4] 2> 241041 INFO (SocketProxy-Acceptor-33885) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=36669,localport=43374], receiveBufferSize=530904
[junit4] 2> 241042 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_n41&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 241043 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 242065 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 7.1.0
[junit4] 2> 242080 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.s.IndexSchema
[collection1_shard2_replica_n41] Schema name=test
[junit4] 2> 242197 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.s.IndexSchema
Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 242210 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.CoreContainer
Creating SolrCore 'collection1_shard2_replica_n41' using configuration from
collection collection1, trusted=true
[junit4] 2> 242211 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.collection1.shard2.replica_n41' (registry
'solr.core.collection1.shard2.replica_n41') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 242211 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 242211 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SolrCore
[[collection1_shard2_replica_n41] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-1-001/cores/collection1_shard2_replica_n41],
dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-1-001/cores/collection1_shard2_replica_n41/data/]
[junit4] 2> 242213 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=15, maxMergeAtOnceExplicit=46, maxMergedSegmentMB=10.3349609375,
floorSegmentMB=1.7568359375, forceMergeDeletesPctAllowed=16.555397847684123,
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.3694449328188243
[junit4] 2> 242218 WARN (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 242259 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.u.UpdateHandler
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 242260 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 242260 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.u.CommitTracker
Hard AutoCommit: disabled
[junit4] 2> 242260 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.u.CommitTracker
Soft AutoCommit: disabled
[junit4] 2> 242261 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=42, maxMergeAtOnceExplicit=37, maxMergedSegmentMB=76.7197265625,
floorSegmentMB=0.66015625, forceMergeDeletesPctAllowed=2.9590274146266795,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
[junit4] 2> 242261 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@4e4797e6[collection1_shard2_replica_n41] main]
[junit4] 2> 242262 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 242262 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 242263 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 242264 INFO
(searcherExecutor-907-thread-1-processing-n:127.0.0.1:33885_krk
x:collection1_shard2_replica_n41 s:shard2 c:collection1) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SolrCore
[collection1_shard2_replica_n41] Registered new searcher
Searcher@4e4797e6[collection1_shard2_replica_n41]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 242264 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1577437200851664896
[junit4] 2> 242267 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 242267 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 242267 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SyncStrategy
Sync replicas to http://127.0.0.1:33885/krk/collection1_shard2_replica_n41/
[junit4] 2> 242267 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SyncStrategy
Sync Success - now sync replicas to me
[junit4] 2> 242267 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.SyncStrategy
http://127.0.0.1:33885/krk/collection1_shard2_replica_n41/ has no replicas
[junit4] 2> 242267 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in
election, clear LIR
[junit4] 2> 242268 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:33885/krk/collection1_shard2_replica_n41/ shard2
[junit4] 2> 242419 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.c.ZkController
I am the leader, no recovery necessary
[junit4] 2> 242420 INFO (qtp917512293-1971) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n41] o.a.s.s.HttpSolrCall
[admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_n41&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=NRT}
status=0 QTime=1377
[junit4] 2> 242421 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={node=127.0.0.1:33885_krk&action=ADDREPLICA&collection=collection1&shard=shard2&type=NRT&wt=javabin&version=2}
status=0 QTime=1383
[junit4] 2> 242484 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-2-001
of type NRT
[junit4] 2> 242484 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 242510 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@82c4d1{/krk,null,AVAILABLE}
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@6ddbf6f6{HTTP/1.1,[http/1.1]}{127.0.0.1:45365}
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server Started @244076ms
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/tempDir-001/jetty2,
replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/krk, hostPort=45099,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-2-001/cores}
[junit4] 2> 242511 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
7.1.0
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 242511 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-09-02T14:09:04.443Z
[junit4] 2> 242513 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 242513 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-2-001/solr.xml
[junit4] 2> 242515 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 242517 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:37837/solr
[junit4] 2> 242522 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (2)
[junit4] 2> 242523 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 242524 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:45099_krk
[junit4] 2> 242524 INFO
(zkCallback-376-thread-2-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (2) -> (3)
[junit4] 2> 242524 INFO
(zkCallback-388-thread-1-processing-n:127.0.0.1:33885_krk)
[n:127.0.0.1:33885_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (2) -> (3)
[junit4] 2> 242524 INFO (zkCallback-383-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 242524 INFO
(zkCallback-394-thread-1-processing-n:127.0.0.1:45099_krk)
[n:127.0.0.1:45099_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (2) -> (3)
[junit4] 2> 242552 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 242557 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 242558 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 242559 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:45099_krk ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-2-001/cores
[junit4] 2> 242580 INFO (qtp167089157-1917) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params
node=127.0.0.1:45099_krk&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 242581 INFO
(OverseerCollectionConfigSetProcessor-98589824753205252-127.0.0.1:45679_krk-n_0000000000)
[n:127.0.0.1:45679_krk ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000004 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 242581 INFO
(OverseerThreadFactory-893-thread-4-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.AddReplicaCmd Node Identified
127.0.0.1:45099_krk for creating new replica
[junit4] 2> 242585 INFO (SocketProxy-Acceptor-45099) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=34992,localport=45099], receiveBufferSize:531000
[junit4] 2> 242592 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n43&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 242592 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 242597 INFO (SocketProxy-Acceptor-45099) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=45365,localport=47654], receiveBufferSize=530904
[junit4] 2> 243607 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 7.1.0
[junit4] 2> 243615 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.s.IndexSchema
[collection1_shard1_replica_n43] Schema name=test
[junit4] 2> 243676 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.s.IndexSchema
Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 243682 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.CoreContainer
Creating SolrCore 'collection1_shard1_replica_n43' using configuration from
collection collection1, trusted=true
[junit4] 2> 243683 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.collection1.shard1.replica_n43' (registry
'solr.core.collection1.shard1.replica_n43') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 243683 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 243683 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SolrCore
[[collection1_shard1_replica_n43] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-2-001/cores/collection1_shard1_replica_n43],
dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-2-001/cores/collection1_shard1_replica_n43/data/]
[junit4] 2> 243685 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=15, maxMergeAtOnceExplicit=46, maxMergedSegmentMB=10.3349609375,
floorSegmentMB=1.7568359375, forceMergeDeletesPctAllowed=16.555397847684123,
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.3694449328188243
[junit4] 2> 243687 WARN (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 243716 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.u.UpdateHandler
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 243716 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 243717 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.u.CommitTracker
Hard AutoCommit: disabled
[junit4] 2> 243717 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.u.CommitTracker
Soft AutoCommit: disabled
[junit4] 2> 243718 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=42, maxMergeAtOnceExplicit=37, maxMergedSegmentMB=76.7197265625,
floorSegmentMB=0.66015625, forceMergeDeletesPctAllowed=2.9590274146266795,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
[junit4] 2> 243718 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@14502bd8[collection1_shard1_replica_n43] main]
[junit4] 2> 243720 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 243720 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 243721 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 243721 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1577437202379440128
[junit4] 2> 243724 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 243724 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 243724 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SyncStrategy
Sync replicas to http://127.0.0.1:45099/krk/collection1_shard1_replica_n43/
[junit4] 2> 243724 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SyncStrategy
Sync Success - now sync replicas to me
[junit4] 2> 243724 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SyncStrategy
http://127.0.0.1:45099/krk/collection1_shard1_replica_n43/ has no replicas
[junit4] 2> 243724 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in
election, clear LIR
[junit4] 2> 243726 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:45099/krk/collection1_shard1_replica_n43/ shard1
[junit4] 2> 243733 INFO
(searcherExecutor-918-thread-1-processing-n:127.0.0.1:45099_krk
x:collection1_shard1_replica_n43 s:shard1 c:collection1) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.SolrCore
[collection1_shard1_replica_n43] Registered new searcher
Searcher@14502bd8[collection1_shard1_replica_n43]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 243877 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.c.ZkController
I am the leader, no recovery necessary
[junit4] 2> 243877 INFO (qtp656465733-2003) [n:127.0.0.1:45099_krk
c:collection1 s:shard1 x:collection1_shard1_replica_n43] o.a.s.s.HttpSolrCall
[admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n43&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT}
status=0 QTime=1285
[junit4] 2> 243879 INFO (qtp167089157-1917) [n:127.0.0.1:45679_krk ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={node=127.0.0.1:45099_krk&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2}
status=0 QTime=1299
[junit4] 2> 243982 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-3-001
of type NRT
[junit4] 2> 243983 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server jetty-9.3.20.v20170531
[junit4] 2> 244023 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@604a7ee{/krk,null,AVAILABLE}
[junit4] 2> 244025 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@16a49819{HTTP/1.1,[http/1.1]}{127.0.0.1:44077}
[junit4] 2> 244025 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.Server Started @245590ms
[junit4] 2> 244025 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/tempDir-001/jetty3,
replicaType=NRT, solrconfig=solrconfig.xml, hostContext=/krk, hostPort=42485,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-3-001/cores}
[junit4] 2> 244025 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 244032 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
7.1.0
[junit4] 2> 244032 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 244032 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null, Default config
dir: null
[junit4] 2> 244032 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-09-02T14:09:05.964Z
[junit4] 2> 244034 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 244034 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-3-001/solr.xml
[junit4] 2> 244037 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SolrXmlConfig MBean server found:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c, but no JMX reporters were
configured - adding default JMX reporter.
[junit4] 2> 244039 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:37837/solr
[junit4] 2> 244043 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (0) -> (3)
[junit4] 2> 244044 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 244051 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:42485_krk
[junit4] 2> 244053 INFO
(zkCallback-394-thread-1-processing-n:127.0.0.1:45099_krk)
[n:127.0.0.1:45099_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (3) -> (4)
[junit4] 2> 244053 INFO
(zkCallback-388-thread-1-processing-n:127.0.0.1:33885_krk)
[n:127.0.0.1:33885_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (3) -> (4)
[junit4] 2> 244053 INFO
(zkCallback-376-thread-2-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (3) -> (4)
[junit4] 2> 244054 INFO
(zkCallback-400-thread-1-processing-n:127.0.0.1:42485_krk)
[n:127.0.0.1:42485_krk ] o.a.s.c.c.ZkStateReader Updated live nodes from
ZooKeeper... (3) -> (4)
[junit4] 2> 244056 INFO (zkCallback-383-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 244131 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.node' (registry 'solr.node') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 244140 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jvm' (registry 'solr.jvm') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 244140 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.jetty' (registry 'solr.jetty') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 244142 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067])
[n:127.0.0.1:42485_krk ] o.a.s.c.CorePropertiesLocator Found 0 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-3-001/cores
[junit4] 2> 244185 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params
node=127.0.0.1:42485_krk&action=ADDREPLICA&collection=collection1&shard=shard2&type=NRT&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 244187 INFO
(OverseerCollectionConfigSetProcessor-98589824753205252-127.0.0.1:45679_krk-n_0000000000)
[n:127.0.0.1:45679_krk ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000006 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 244187 INFO
(OverseerThreadFactory-893-thread-5-processing-n:127.0.0.1:45679_krk)
[n:127.0.0.1:45679_krk ] o.a.s.c.AddReplicaCmd Node Identified
127.0.0.1:42485_krk for creating new replica
[junit4] 2> 244188 INFO (SocketProxy-Acceptor-42485) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=33984,localport=42485], receiveBufferSize:531000
[junit4] 2> 244189 INFO (SocketProxy-Acceptor-42485) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=44077,localport=44896], receiveBufferSize=530904
[junit4] 2> 244193 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_n45&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 244193 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 245214 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 7.1.0
[junit4] 2> 245227 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.s.IndexSchema
[collection1_shard2_replica_n45] Schema name=test
[junit4] 2> 245446 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.s.IndexSchema
Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 245484 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.c.CoreContainer
Creating SolrCore 'collection1_shard2_replica_n45' using configuration from
collection collection1, trusted=true
[junit4] 2> 245485 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.collection1.shard2.replica_n45' (registry
'solr.core.collection1.shard2.replica_n45') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@580cde4c
[junit4] 2> 245485 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 245485 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.c.SolrCore
[[collection1_shard2_replica_n45] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-3-001/cores/collection1_shard2_replica_n45],
dataDir=[/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001/shard-3-001/cores/collection1_shard2_replica_n45/data/]
[junit4] 2> 245488 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=15, maxMergeAtOnceExplicit=46, maxMergedSegmentMB=10.3349609375,
floorSegmentMB=1.7568359375, forceMergeDeletesPctAllowed=16.555397847684123,
segmentsPerTier=36.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.3694449328188243
[junit4] 2> 245492 WARN (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 245540 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.u.UpdateHandler
Using UpdateLog implementation: org.apache.solr.update.UpdateLog
[junit4] 2> 245540 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 245541 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.u.CommitTracker
Hard AutoCommit: disabled
[junit4] 2> 245541 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.u.CommitTracker
Soft AutoCommit: disabled
[junit4] 2> 245542 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=42, maxMergeAtOnceExplicit=37, maxMergedSegmentMB=76.7197265625,
floorSegmentMB=0.66015625, forceMergeDeletesPctAllowed=2.9590274146266795,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12, noCFSRatio=0.0
[junit4] 2> 245542 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@6c5b5b9f[collection1_shard2_replica_n45] main]
[junit4] 2> 245543 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 245543 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 245543 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 245545 INFO
(searcherExecutor-929-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.c.SolrCore
[collection1_shard2_replica_n45] Registered new searcher
Searcher@6c5b5b9f[collection1_shard2_replica_n45]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 245545 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1577437204292042752
[junit4] 2> 245547 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.c.ZkController
Core needs to recover:collection1_shard2_replica_n45
[junit4] 2> 245547 INFO
(updateExecutor-397-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 r:core_node46 x:collection1_shard2_replica_n45]
o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 245547 INFO (qtp2065699276-2033) [n:127.0.0.1:42485_krk
c:collection1 s:shard2 x:collection1_shard2_replica_n45] o.a.s.s.HttpSolrCall
[admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard2_replica_n45&action=CREATE&collection=collection1&shard=shard2&wt=javabin&version=2&replicaType=NRT}
status=0 QTime=1354
[junit4] 2> 245548 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Starting recovery
process. recoveringAfterStartup=true
[junit4] 2> 245548 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy ######
startupVersions=[[]]
[junit4] 2> 245548 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Begin buffering
updates. core=[collection1_shard2_replica_n45]
[junit4] 2> 245548 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.UpdateLog Starting to buffer updates.
FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 245548 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Publishing state of
core [collection1_shard2_replica_n45] as recovering, leader is
[http://127.0.0.1:33885/krk/collection1_shard2_replica_n41/] and I am
[http://127.0.0.1:42485/krk/collection1_shard2_replica_n45/]
[junit4] 2> 245550 INFO (qtp167089157-1923) [n:127.0.0.1:45679_krk ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={node=127.0.0.1:42485_krk&action=ADDREPLICA&collection=collection1&shard=shard2&type=NRT&wt=javabin&version=2}
status=0 QTime=1364
[junit4] 2> 245552 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Sending prep
recovery command to [http://127.0.0.1:33885/krk]; [WaitForState:
action=PREPRECOVERY&core=collection1_shard2_replica_n41&nodeName=127.0.0.1:42485_krk&coreNodeName=core_node46&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 2> 245553 INFO (SocketProxy-Acceptor-33885) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=45496,localport=33885], receiveBufferSize:531000
[junit4] 2> 245554 INFO (SocketProxy-Acceptor-33885) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=36669,localport=43472], receiveBufferSize=530904
[junit4] 2> 245554 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.SolrTestCaseJ4 ###Starting test
[junit4] 2> 245554 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait
30000 for each attempt
[junit4] 2> 245554 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk ]
o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node46, state:
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true,
maxTime: 183 s
[junit4] 2> 245554 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection:
collection1 failOnTimeout:true timeout (sec):30000
[junit4] 2> 245555 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk ]
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1,
shard=shard2, thisCore=collection1_shard2_replica_n41,
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true,
currentState=down, localState=active, nodeName=127.0.0.1:42485_krk,
coreNodeName=core_node46, onlyIfActiveCheckResult=false, nodeProps:
core_node46:{"core":"collection1_shard2_replica_n45","base_url":"http://127.0.0.1:42485/krk","node_name":"127.0.0.1:42485_krk","state":"down","type":"NRT"}
[junit4] 2> 246188 INFO
(OverseerCollectionConfigSetProcessor-98589824753205252-127.0.0.1:45679_krk-n_0000000000)
[n:127.0.0.1:45679_krk ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000008 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 246555 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk ]
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1,
shard=shard2, thisCore=collection1_shard2_replica_n41,
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true,
currentState=recovering, localState=active, nodeName=127.0.0.1:42485_krk,
coreNodeName=core_node46, onlyIfActiveCheckResult=false, nodeProps:
core_node46:{"core":"collection1_shard2_replica_n45","base_url":"http://127.0.0.1:42485/krk","node_name":"127.0.0.1:42485_krk","state":"recovering","type":"NRT"}
[junit4] 2> 246555 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk ]
o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node46, state: recovering,
checkLive: true, onlyIfLeader: true for: 1 seconds.
[junit4] 2> 246555 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores
params={nodeName=127.0.0.1:42485_krk&onlyIfLeaderActive=true&core=collection1_shard2_replica_n41&coreNodeName=core_node46&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=0 QTime=1000
[junit4] 2> 247055 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Attempting to
PeerSync from [http://127.0.0.1:33885/krk/collection1_shard2_replica_n41/] -
recoveringAfterStartup=[true]
[junit4] 2> 247056 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.PeerSync PeerSync:
core=collection1_shard2_replica_n45 url=http://127.0.0.1:42485/krk START
replicas=[http://127.0.0.1:33885/krk/collection1_shard2_replica_n41/]
nUpdates=100
[junit4] 2> 247056 INFO (SocketProxy-Acceptor-33885) [ ]
o.a.s.c.SocketProxy accepted
Socket[addr=/127.0.0.1,port=45536,localport=33885], receiveBufferSize:531000
[junit4] 2> 247057 INFO (SocketProxy-Acceptor-33885) [ ]
o.a.s.c.SocketProxy proxy connection
Socket[addr=/127.0.0.1,port=36669,localport=43512], receiveBufferSize=530904
[junit4] 2> 247059 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_n41]
o.a.s.u.IndexFingerprint IndexFingerprint millis:1.0
result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0,
maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
[junit4] 2> 247059 INFO (qtp917512293-1973) [n:127.0.0.1:33885_krk
c:collection1 s:shard2 r:core_node42 x:collection1_shard2_replica_n41]
o.a.s.c.S.Request [collection1_shard2_replica_n41] webapp=/krk path=/get
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
status=0 QTime=1
[junit4] 2> 247060 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.IndexFingerprint IndexFingerprint
millis:0.0 result:{maxVersionSpecified=9223372036854775807,
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0,
maxDoc=0}
[junit4] 2> 247060 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.PeerSync We are already in sync. No
need to do a PeerSync
[junit4] 2> 247060 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 247060 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.DirectUpdateHandler2 No uncommitted
changes. Skipping IW.commit.
[junit4] 2> 247060 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 247061 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy PeerSync stage of
recovery was successful.
[junit4] 2> 247061 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Replaying updates
buffered during PeerSync.
[junit4] 2> 247061 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 247061 INFO
(recoveryExecutor-398-thread-1-processing-n:127.0.0.1:42485_krk
x:collection1_shard2_replica_n45 s:shard2 c:collection1 r:core_node46)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.RecoveryStrategy Registering as
Active after recovery.
[junit4] 2> 247555 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
[junit4] 2> 247557 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 r:core_node2
x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start
commit{_version_=1577437206401777664,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 247557 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 r:core_node2
x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 No
uncommitted changes. Skipping IW.commit.
[junit4] 2> 247558 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 r:core_node2
x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 247558 INFO (qtp167089157-1921) [n:127.0.0.1:45679_krk
c:control_collection s:shard1 r:core_node2
x:control_collection_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory
[control_collection_shard1_
[...truncated too long message...]
ing reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@6b51a43:
rootName = null, domain = solr.jetty, service url = null, agent id = null] for
registry solr.jetty / com.codahale.metrics.MetricRegistry@1aa880
[junit4] 2> 369520 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.cluster,
tag=null
[junit4] 2> 369521 INFO (coreCloseExecutor-961-thread-1)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.c.SolrCore
[collection1_shard2_replica_n45] CLOSING SolrCore
org.apache.solr.core.SolrCore@74e87abd
[junit4] 2> 369521 INFO (coreCloseExecutor-961-thread-1)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.core.collection1.shard2.replica_n45, tag=1961392829
[junit4] 2> 369521 INFO (coreCloseExecutor-961-thread-1)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.m.r.SolrJmxReporter Closing reporter
[org.apache.solr.metrics.reporters.SolrJmxReporter@5d2c98b5: rootName = null,
domain = solr.core.collection1.shard2.replica_n45, service url = null, agent id
= null] for registry solr.core.collection1.shard2.replica_n45 /
com.codahale.metrics.MetricRegistry@ebc4b4d
[junit4] 2> 369529 INFO (coreCloseExecutor-961-thread-1)
[n:127.0.0.1:42485_krk c:collection1 s:shard2 r:core_node46
x:collection1_shard2_replica_n45] o.a.s.m.SolrMetricManager Closing metric
reporters for registry=solr.collection.collection1.shard2.leader, tag=1961392829
[junit4] 2> 369533 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.Overseer Overseer
(id=98589824753205263-127.0.0.1:42485_krk-n_0000000003) closing
[junit4] 2> 369533 INFO
(OverseerStateUpdate-98589824753205263-127.0.0.1:42485_krk-n_0000000003)
[n:127.0.0.1:42485_krk ] o.a.s.c.Overseer Overseer Loop exiting :
127.0.0.1:42485_krk
[junit4] 2> 371034 WARN
(zkCallback-400-thread-3-processing-n:127.0.0.1:42485_krk)
[n:127.0.0.1:42485_krk ] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered,
but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for /live_nodes]
[junit4] 2> 371035 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.AbstractConnector Stopped
ServerConnector@16a49819{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
[junit4] 2> 371035 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@604a7ee{/krk,null,UNAVAILABLE}
[junit4] 2> 371035 ERROR
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper
server won't take any action on ERROR or SHUTDOWN server state changes
[junit4] 2> 371035 INFO
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.ZkTestServer connecting to 127.0.0.1:37837 37837
[junit4] 2> 376066 INFO (Thread-381) [ ] o.a.s.c.ZkTestServer
connecting to 127.0.0.1:37837 37837
[junit4] 2> 376067 WARN (Thread-381) [ ] o.a.s.c.ZkTestServer Watch
limit violations:
[junit4] 2> Maximum concurrent create/delete watches above limit:
[junit4] 2>
[junit4] 2> 6 /solr/aliases.json
[junit4] 2> 4 /solr/security.json
[junit4] 2> 4 /solr/configs/conf1
[junit4] 2> 3 /solr/collections/c8n_1x3_lf/state.json
[junit4] 2> 3 /solr/collections/collection1/state.json
[junit4] 2>
[junit4] 2> Maximum concurrent data watches above limit:
[junit4] 2>
[junit4] 2> 6 /solr/clusterstate.json
[junit4] 2> 6 /solr/clusterprops.json
[junit4] 2> 2
/solr/overseer_elect/election/98589824753205252-127.0.0.1:45679_krk-n_0000000000
[junit4] 2>
[junit4] 2> Maximum concurrent children watches above limit:
[junit4] 2>
[junit4] 2> 6 /solr/live_nodes
[junit4] 2> 6 /solr/collections
[junit4] 2> 3 /solr/overseer/queue
[junit4] 2> 3 /solr/overseer/collection-queue-work
[junit4] 2> 2 /solr/overseer/queue-work
[junit4] 2>
[junit4] 2> 376067 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SocketProxy Closing 1 connections to: http://127.0.0.1:45679/krk,
target: http://127.0.0.1:44303/krk
[junit4] 2> 376067 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:42485/krk,
target: http://127.0.0.1:44077/krk
[junit4] 2> 376067 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SocketProxy Closing 6 connections to: http://127.0.0.1:45099/krk,
target: http://127.0.0.1:45365/krk
[junit4] 2> 376067 WARN
(TEST-LeaderFailoverAfterPartitionTest.test-seed#[B6F7C669EDE65067]) [ ]
o.a.s.c.SocketProxy Closing 0 connections to: http://127.0.0.1:33885/krk,
target: http://127.0.0.1:36669/krk
[junit4] 2> NOTE: reproduce with: ant test
-Dtestcase=LeaderFailoverAfterPartitionTest -Dtests.method=test
-Dtests.seed=B6F7C669EDE65067 -Dtests.multiplier=3 -Dtests.slow=true
-Dtests.locale=en-GB -Dtests.timezone=America/Panama -Dtests.asserts=true
-Dtests.file.encoding=UTF-8
[junit4] FAILURE 138s J1 | LeaderFailoverAfterPartitionTest.test <<<
[junit4] > Throwable #1: java.lang.AssertionError: Expected 2 of 3
replicas to be active but only found 1;
[core_node5:{"core":"c8n_1x3_lf_shard1_replica_n3","base_url":"http://127.0.0.1:45099/krk","node_name":"127.0.0.1:45099_krk","state":"active","type":"NRT","leader":"true"}];
clusterState: DocCollection(c8n_1x3_lf//clusterstate.json/29)={
[junit4] > "pullReplicas":"0",
[junit4] > "replicationFactor":"1",
[junit4] > "shards":{"shard1":{
[junit4] > "range":"80000000-7fffffff",
[junit4] > "state":"active",
[junit4] > "replicas":{
[junit4] > "core_node4":{
[junit4] > "state":"down",
[junit4] > "base_url":"http://127.0.0.1:45679/krk",
[junit4] > "core":"c8n_1x3_lf_shard1_replica_n1",
[junit4] > "node_name":"127.0.0.1:45679_krk",
[junit4] > "type":"NRT"},
[junit4] > "core_node5":{
[junit4] > "core":"c8n_1x3_lf_shard1_replica_n3",
[junit4] > "base_url":"http://127.0.0.1:45099/krk",
[junit4] > "node_name":"127.0.0.1:45099_krk",
[junit4] > "state":"active",
[junit4] > "type":"NRT",
[junit4] > "leader":"true"},
[junit4] > "core_node6":{
[junit4] > "core":"c8n_1x3_lf_shard1_replica_n2",
[junit4] > "base_url":"http://127.0.0.1:33885/krk",
[junit4] > "node_name":"127.0.0.1:33885_krk",
[junit4] > "state":"down",
[junit4] > "type":"NRT"}}}},
[junit4] > "router":{"name":"compositeId"},
[junit4] > "maxShardsPerNode":"1",
[junit4] > "autoAddReplicas":"false",
[junit4] > "nrtReplicas":"3",
[junit4] > "tlogReplicas":"0"}
[junit4] > at
__randomizedtesting.SeedInfo.seed([B6F7C669EDE65067:3EA3F9B3431A3D9F]:0)
[junit4] > at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.testRf3WithLeaderFailover(LeaderFailoverAfterPartitionTest.java:169)
[junit4] > at
org.apache.solr.cloud.LeaderFailoverAfterPartitionTest.test(LeaderFailoverAfterPartitionTest.java:56)
[junit4] > at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
[junit4] > at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
[junit4] > at java.lang.Thread.run(Thread.java:748)
[junit4] 2> NOTE: leaving temporary files on disk at:
/home/jenkins/workspace/Lucene-Solr-7.x-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.LeaderFailoverAfterPartitionTest_B6F7C669EDE65067-001
[junit4] 2> NOTE: test params are: codec=Asserting(Lucene70):
{multiDefault=FST50, a_t=PostingsFormat(name=MockRandom),
id=Lucene50(blocksize=128),
text=PostingsFormat(name=LuceneVarGapFixedInterval)},
docValues:{range_facet_l_dv=DocValuesFormat(name=Lucene70),
_version_=DocValuesFormat(name=Memory),
intDefault=DocValuesFormat(name=Memory), id_i1=DocValuesFormat(name=Asserting),
range_facet_i_dv=DocValuesFormat(name=Memory),
intDvoDefault=DocValuesFormat(name=Direct),
range_facet_l=DocValuesFormat(name=Memory),
timestamp=DocValuesFormat(name=Memory)}, maxPointsInLeafNode=1934,
maxMBSortInHeap=6.671089547720577, sim=RandomSimilarity(queryNorm=false): {},
locale=en-GB, timezone=America/Panama
[junit4] 2> NOTE: Linux 4.10.0-33-generic amd64/Oracle Corporation
1.8.0_144 (64-bit)/cpus=8,threads=1,free=59767432,total=503316480
[junit4] 2> NOTE: All tests run in this JVM: [SystemInfoHandlerTest,
TestReqParamsAPI, TestObjectReleaseTracker, SampleTest, TestHashPartitioner,
TestDocTermOrds, IndexSchemaRuntimeFieldTest, TestTolerantUpdateProcessorCloud,
PeerSyncReplicationTest, DistributedFacetPivotSmallAdvancedTest,
TestCoreAdminApis, SpatialRPTFieldTypeTest, TestSearcherReuse,
TestDynamicLoading, TestSQLHandlerNonCloud, TestTestInjection, TestInitQParser,
TestComplexPhraseQParserPlugin, BasicAuthStandaloneTest, ZkControllerTest,
SimpleCollectionCreateDeleteTest, OverriddenZkACLAndCredentialsProvidersTest,
LukeRequestHandlerTest, TestSystemCollAutoCreate, TestNumericTerms64,
SpatialFilterTest, DistanceFunctionTest, RuleEngineTest,
LeaderFailoverAfterPartitionTest]
[junit4] Completed [101/731 (1!)] on J1 in 138.40s, 1 test, 1 failure <<<
FAILURES!
[...truncated 48297 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]