Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/18851/
Java: 64bit/jdk1.8.0_121 -XX:+UseCompressedOops -XX:+UseSerialGC
1 tests failed.
FAILED: org.apache.solr.cloud.PeerSyncReplicationTest.test
Error Message:
timeout waiting to see all nodes active
Stack Trace:
java.lang.AssertionError: timeout waiting to see all nodes active
at
__randomizedtesting.SeedInfo.seed([52FFFC7F0CF16ACE:DAABC3A5A20D0736]:0)
at org.junit.Assert.fail(Assert.java:93)
at
org.apache.solr.cloud.PeerSyncReplicationTest.waitTillNodesActive(PeerSyncReplicationTest.java:326)
at
org.apache.solr.cloud.PeerSyncReplicationTest.bringUpDeadNodeAndEnsureNoReplication(PeerSyncReplicationTest.java:277)
at
org.apache.solr.cloud.PeerSyncReplicationTest.forceNodeFailureAndDoPeerSync(PeerSyncReplicationTest.java:259)
at
org.apache.solr.cloud.PeerSyncReplicationTest.test(PeerSyncReplicationTest.java:138)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:745)
Build Log:
[...truncated 11099 lines...]
[junit4] Suite: org.apache.solr.cloud.PeerSyncReplicationTest
[junit4] 2> Creating dataDir:
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/init-core-data-001
[junit4] 2> 145176 INFO
(SUITE-PeerSyncReplicationTest-seed#[52FFFC7F0CF16ACE]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using TrieFields
[junit4] 2> 145178 INFO
(SUITE-PeerSyncReplicationTest-seed#[52FFFC7F0CF16ACE]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (true) via:
@org.apache.solr.util.RandomizeSSL(reason=, value=NaN, ssl=NaN, clientAuth=NaN)
[junit4] 2> 145178 INFO
(SUITE-PeerSyncReplicationTest-seed#[52FFFC7F0CF16ACE]-worker) [ ]
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property:
/vlvxe/ei
[junit4] 2> 145181 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
[junit4] 2> 145181 INFO (Thread-346) [ ] o.a.s.c.ZkTestServer client
port:0.0.0.0/0.0.0.0:0
[junit4] 2> 145181 INFO (Thread-346) [ ] o.a.s.c.ZkTestServer Starting
server
[junit4] 2> 145281 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkTestServer start zk server on port:42121
[junit4] 2> 145306 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
to /configs/conf1/solrconfig.xml
[junit4] 2> 145308 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
to /configs/conf1/schema.xml
[junit4] 2> 145309 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 145310 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
to /configs/conf1/stopwords.txt
[junit4] 2> 145312 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
to /configs/conf1/protwords.txt
[junit4] 2> 145314 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
to /configs/conf1/currency.xml
[junit4] 2> 145315 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
to /configs/conf1/enumsConfig.xml
[junit4] 2> 145316 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
to /configs/conf1/open-exchange-rates.json
[junit4] 2> 145317 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 145318 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
to /configs/conf1/old_synonyms.txt
[junit4] 2> 145319 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractZkTestCase put
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
to /configs/conf1/synonyms.txt
[junit4] 2> 145476 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/control-001/cores/collection1
[junit4] 2> 145481 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 145481 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@7694f7cf{/vlvxe/ei,null,AVAILABLE}
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@37ecd2bf{HTTP/1.1,[http/1.1]}{127.0.0.1:42439}
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server Started @148620ms
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/tempDir-001/control/data,
hostContext=/vlvxe/ei, hostPort=42439,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/control-001/cores}
[junit4] 2> 145483 ERROR
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.0.0
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 145483 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-01-26T13:58:27.699Z
[junit4] 2> 145491 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 145491 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/control-001/solr.xml
[junit4] 2> 145501 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42121/solr
[junit4] 2> 145546 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.OverseerElectionContext I am going
to be the leader 127.0.0.1:42439_vlvxe%2Fei
[junit4] 2> 145547 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.Overseer Overseer
(id=97349737342435332-127.0.0.1:42439_vlvxe%2Fei-n_0000000000) starting
[junit4] 2> 145557 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.ZkController Register node as live
in ZooKeeper:/live_nodes/127.0.0.1:42439_vlvxe%2Fei
[junit4] 2> 145568 INFO
(zkCallback-151-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (0) -> (1)
[junit4] 2> 145590 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Found 1 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/control-001/cores
[junit4] 2> 145590 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Cores are:
[collection1]
[junit4] 2> 145596 INFO
(OverseerStateUpdate-97349737342435332-127.0.0.1:42439_vlvxe%2Fei-n_0000000000)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.o.ReplicaMutator Assigning new node
to shard shard=shard1
[junit4] 2> 146618 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.c.SolrConfig Using Lucene MatchVersion: 7.0.0
[junit4] 2> 146664 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.s.IndexSchema [collection1] Schema name=test
[junit4] 2> 146886 WARN
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.s.IndexSchema [collection1] default search field in schema is text.
WARNING: Deprecated, please use 'df' on request instead.
[junit4] 2> 146890 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 146905 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from
collection control_collection
[junit4] 2> 146914 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/control-001/cores/collection1],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/control-001/cores/collection1/data/]
[junit4] 2> 146915 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@86d02d1
[junit4] 2> 146920 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=50, maxMergeAtOnceExplicit=30, maxMergedSegmentMB=83.86328125,
floorSegmentMB=1.533203125, forceMergeDeletesPctAllowed=11.300284571286749,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.17565648372055015
[junit4] 2> 147015 WARN
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 147029 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.UpdateLog
[junit4] 2> 147029 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH
numRecordsToKeep=1000 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 147030 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 147030 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 147032 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=38, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 147034 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.s.SolrIndexSearcher Opening [Searcher@24a31179[collection1] main]
[junit4] 2> 147036 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 147036 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 147036 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 147037 INFO
(searcherExecutor-665-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection x:collection1] o.a.s.c.SolrCore [collection1] Registered
new searcher Searcher@24a31179[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 147038 INFO
(coreLoadExecutor-664-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection x:collection1]
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using
new clock 1557595799425122304
[junit4] 2> 147046 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 147046 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 147046 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy
Sync replicas to http://127.0.0.1:42439/vlvxe/ei/collection1/
[junit4] 2> 147046 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy
Sync Success - now sync replicas to me
[junit4] 2> 147046 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy
http://127.0.0.1:42439/vlvxe/ei/collection1/ has no replicas
[junit4] 2> 147050 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:42439/vlvxe/ei/collection1/ shard1
[junit4] 2> 147153 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 147153 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42121/solr ready
[junit4] 2> 147154 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection
loss:false
[junit4] 2> 147200 INFO
(coreZkRegister-657-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei
x:collection1 c:control_collection) [n:127.0.0.1:42439_vlvxe%2Fei
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController
I am the leader, no recovery necessary
[junit4] 2> 147234 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001/cores/collection1
[junit4] 2> 147235 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001
[junit4] 2> 147236 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 147242 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@35168835{/vlvxe/ei,null,AVAILABLE}
[junit4] 2> 147243 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@1b4de1e6{HTTP/1.1,[http/1.1]}{127.0.0.1:34850}
[junit4] 2> 147243 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server Started @150380ms
[junit4] 2> 147243 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/tempDir-001/jetty1,
solrconfig=solrconfig.xml, hostContext=/vlvxe/ei, hostPort=34850,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001/cores}
[junit4] 2> 147243 ERROR
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 147244 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.0.0
[junit4] 2> 147244 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 147244 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 147244 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-01-26T13:58:29.460Z
[junit4] 2> 147253 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 147254 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001/solr.xml
[junit4] 2> 147262 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42121/solr
[junit4] 2> 147270 WARN (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [ ]
o.a.z.s.NIOServerCnxn caught end of stream exception
[junit4] 2> EndOfStreamException: Unable to read additional data from
client sessionid 0x159db1201460007, likely client has closed socket
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
[junit4] 2> at
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
[junit4] 2> at java.lang.Thread.run(Thread.java:745)
[junit4] 2> 147278 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (0) -> (1)
[junit4] 2> 147282 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.ZkController Register node as live
in ZooKeeper:/live_nodes/127.0.0.1:34850_vlvxe%2Fei
[junit4] 2> 147284 INFO (zkCallback-155-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 147284 INFO
(zkCallback-151-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (1) -> (2)
[junit4] 2> 147285 INFO
(zkCallback-160-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (1) -> (2)
[junit4] 2> 147342 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Found 1 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001/cores
[junit4] 2> 147342 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Cores are:
[collection1]
[junit4] 2> 147345 INFO
(OverseerStateUpdate-97349737342435332-127.0.0.1:42439_vlvxe%2Fei-n_0000000000)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.o.ReplicaMutator Assigning new node
to shard shard=shard1
[junit4] 2> 148358 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 7.0.0
[junit4] 2> 148370 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema [collection1] Schema name=test
[junit4] 2> 148478 WARN
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema [collection1] default search field in schema is text.
WARNING: Deprecated, please use 'df' on request instead.
[junit4] 2> 148481 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 148494 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from
collection collection1
[junit4] 2> 148495 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1] o.a.s.c.SolrCore
[[collection1] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001/cores/collection1],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-1-001/cores/collection1/data/]
[junit4] 2> 148495 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@86d02d1
[junit4] 2> 148497 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=50, maxMergeAtOnceExplicit=30, maxMergedSegmentMB=83.86328125,
floorSegmentMB=1.533203125, forceMergeDeletesPctAllowed=11.300284571286749,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.17565648372055015
[junit4] 2> 148565 WARN
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 148578 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.UpdateLog
[junit4] 2> 148578 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=1000
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 148579 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 148579 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 148580 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=38, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 148581 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.SolrIndexSearcher Opening [Searcher@1d620cee[collection1] main]
[junit4] 2> 148582 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 148582 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 148582 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 148584 INFO
(coreLoadExecutor-675-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 x:collection1] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1557595801046220800
[junit4] 2> 148586 INFO
(searcherExecutor-676-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@1d620cee[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 148589 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough
replicas found to continue.
[junit4] 2> 148589 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may
be the new leader - try and sync
[junit4] 2> 148590 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:34850/vlvxe/ei/collection1/
[junit4] 2> 148590 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now
sync replicas to me
[junit4] 2> 148590 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy
http://127.0.0.1:34850/vlvxe/ei/collection1/ has no replicas
[junit4] 2> 148618 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am
the new leader: http://127.0.0.1:34850/vlvxe/ei/collection1/ shard1
[junit4] 2> 148769 INFO
(coreZkRegister-670-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:34850_vlvxe%2Fei c:collection1
s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no
recovery necessary
[junit4] 2> 149053 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001/cores/collection1
[junit4] 2> 149054 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001
[junit4] 2> 149055 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 149056 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@f021e5c{/vlvxe/ei,null,AVAILABLE}
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@273cd55d{HTTP/1.1,[http/1.1]}{127.0.0.1:46668}
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server Started @152194ms
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/tempDir-001/jetty2,
solrconfig=solrconfig.xml, hostContext=/vlvxe/ei, hostPort=46668,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001/cores}
[junit4] 2> 149057 ERROR
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.0.0
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 149057 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-01-26T13:58:31.273Z
[junit4] 2> 149063 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 149063 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001/solr.xml
[junit4] 2> 149073 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42121/solr
[junit4] 2> 149104 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (0) -> (2)
[junit4] 2> 149110 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.ZkController Register node as live
in ZooKeeper:/live_nodes/127.0.0.1:46668_vlvxe%2Fei
[junit4] 2> 149111 INFO (zkCallback-155-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 149111 INFO
(zkCallback-160-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (2) -> (3)
[junit4] 2> 149123 INFO
(zkCallback-166-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (2) -> (3)
[junit4] 2> 149132 INFO
(zkCallback-151-thread-1-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (2) -> (3)
[junit4] 2> 149181 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Found 1 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001/cores
[junit4] 2> 149181 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Cores are:
[collection1]
[junit4] 2> 149190 INFO
(OverseerStateUpdate-97349737342435332-127.0.0.1:42439_vlvxe%2Fei-n_0000000000)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.o.ReplicaMutator Assigning new node
to shard shard=shard1
[junit4] 2> 150212 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 7.0.0
[junit4] 2> 150229 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema [collection1] Schema name=test
[junit4] 2> 150358 WARN
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema [collection1] default search field in schema is text.
WARNING: Deprecated, please use 'df' on request instead.
[junit4] 2> 150359 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 150378 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from
collection collection1
[junit4] 2> 150378 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1] o.a.s.c.SolrCore
[[collection1] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001/cores/collection1],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-2-001/cores/collection1/data/]
[junit4] 2> 150379 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@86d02d1
[junit4] 2> 150381 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=50, maxMergeAtOnceExplicit=30, maxMergedSegmentMB=83.86328125,
floorSegmentMB=1.533203125, forceMergeDeletesPctAllowed=11.300284571286749,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.17565648372055015
[junit4] 2> 150442 WARN
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 150462 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.UpdateLog
[junit4] 2> 150462 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=1000
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 150463 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 150463 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 150464 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=38, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 150465 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.SolrIndexSearcher Opening [Searcher@b0633cf[collection1] main]
[junit4] 2> 150467 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 150468 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 150468 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 150470 INFO
(searcherExecutor-687-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:46668_vlvxe%2Fei c:collection1
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@b0633cf[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 150470 INFO
(coreLoadExecutor-686-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 x:collection1] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1557595803023835136
[junit4] 2> 150475 INFO
(coreZkRegister-681-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:46668_vlvxe%2Fei c:collection1
s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController Core needs to
recover:collection1
[junit4] 2> 150476 INFO
(updateExecutor-163-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 150480 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process.
recoveringAfterStartup=true
[junit4] 2> 150480 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
[junit4] 2> 150480 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates.
core=[collection1]
[junit4] 2> 150480 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.UpdateLog Starting to buffer updates.
FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 150480 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core [collection1]
as recovering, leader is [http://127.0.0.1:34850/vlvxe/ei/collection1/] and I
am [http://127.0.0.1:46668/vlvxe/ei/collection1/]
[junit4] 2> 150495 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery command to
[http://127.0.0.1:34850/vlvxe/ei]; [WaitForState:
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:46668_vlvxe%252Fei&coreNodeName=core_node2&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 2> 150520 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp Going to wait for
coreNodeName: core_node2, state: recovering, checkLive: true, onlyIfLeader:
true, onlyIfLeaderActive: true
[junit4] 2> 150520 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp Will wait a max of
183 seconds to see collection1 (shard1 of collection1) have state: recovering
[junit4] 2> 150520 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp In
WaitForState(recovering): collection=collection1, shard=shard1,
thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true,
live=true, checkLive=true, currentState=down, localState=active,
nodeName=127.0.0.1:46668_vlvxe%2Fei, coreNodeName=core_node2,
onlyIfActiveCheckResult=false, nodeProps:
core_node2:{"core":"collection1","base_url":"http://127.0.0.1:46668/vlvxe/ei","node_name":"127.0.0.1:46668_vlvxe%2Fei","state":"down"}
[junit4] 2> 150821 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.SolrTestCaseJ4 Writing core.properties file to
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001/cores/collection1
[junit4] 2> 150821 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001
[junit4] 2> 150822 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server jetty-9.3.14.v20161028
[junit4] 2> 150828 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@335142c9{/vlvxe/ei,null,AVAILABLE}
[junit4] 2> 150832 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@55521d56{HTTP/1.1,[http/1.1]}{127.0.0.1:36663}
[junit4] 2> 150832 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.Server Started @153969ms
[junit4] 2> 150832 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/tempDir-001/jetty3,
solrconfig=solrconfig.xml, hostContext=/vlvxe/ei, hostPort=36663,
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001/cores}
[junit4] 2> 150832 ERROR
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 150834 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr? version
7.0.0
[junit4] 2> 150834 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 150834 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 150834 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2017-01-26T13:58:33.050Z
[junit4] 2> 150848 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 150848 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.SolrXmlConfig Loading container configuration from
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001/solr.xml
[junit4] 2> 150857 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42121/solr
[junit4] 2> 150884 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:36663_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (0) -> (3)
[junit4] 2> 150887 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:36663_vlvxe%2Fei ] o.a.s.c.ZkController Register node as live
in ZooKeeper:/live_nodes/127.0.0.1:36663_vlvxe%2Fei
[junit4] 2> 150888 INFO
(zkCallback-166-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (3) -> (4)
[junit4] 2> 150889 INFO
(zkCallback-160-thread-1-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (3) -> (4)
[junit4] 2> 150889 INFO (zkCallback-155-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
[junit4] 2> 150896 INFO
(zkCallback-151-thread-3-processing-n:127.0.0.1:42439_vlvxe%2Fei)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (3) -> (4)
[junit4] 2> 150908 INFO
(zkCallback-173-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader Updated live nodes
from ZooKeeper... (3) -> (4)
[junit4] 2> 150979 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:36663_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Found 1 core
definitions underneath
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001/cores
[junit4] 2> 150979 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE])
[n:127.0.0.1:36663_vlvxe%2Fei ] o.a.s.c.CorePropertiesLocator Cores are:
[collection1]
[junit4] 2> 150981 INFO
(OverseerStateUpdate-97349737342435332-127.0.0.1:42439_vlvxe%2Fei-n_0000000000)
[n:127.0.0.1:42439_vlvxe%2Fei ] o.a.s.c.o.ReplicaMutator Assigning new node
to shard shard=shard1
[junit4] 2> 151520 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp In
WaitForState(recovering): collection=collection1, shard=shard1,
thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true,
live=true, checkLive=true, currentState=recovering, localState=active,
nodeName=127.0.0.1:46668_vlvxe%2Fei, coreNodeName=core_node2,
onlyIfActiveCheckResult=false, nodeProps:
core_node2:{"core":"collection1","base_url":"http://127.0.0.1:46668/vlvxe/ei","node_name":"127.0.0.1:46668_vlvxe%2Fei","state":"recovering"}
[junit4] 2> 151520 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp Waited
coreNodeName: core_node2, state: recovering, checkLive: true, onlyIfLeader:
true for: 1 seconds.
[junit4] 2> 151521 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={nodeName=127.0.0.1:46668_vlvxe%252Fei&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node2&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=0 QTime=1000
[junit4] 2> 152007 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 7.0.0
[junit4] 2> 152095 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema [collection1] Schema name=test
[junit4] 2> 152253 WARN
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema [collection1] default search field in schema is text.
WARNING: Deprecated, please use 'df' on request instead.
[junit4] 2> 152267 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 152280 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from
collection collection1
[junit4] 2> 152281 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1] o.a.s.c.SolrCore
[[collection1] ] Opening new SolrCore at
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001/cores/collection1],
dataDir=[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001/shard-3-001/cores/collection1/data/]
[junit4] 2> 152281 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@86d02d1
[junit4] 2> 152284 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy:
maxMergeAtOnce=50, maxMergeAtOnceExplicit=30, maxMergedSegmentMB=83.86328125,
floorSegmentMB=1.533203125, forceMergeDeletesPctAllowed=11.300284571286749,
segmentsPerTier=43.0, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.17565648372055015
[junit4] 2> 152314 WARN
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 152331 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.UpdateLog
[junit4] 2> 152331 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=1000
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 152332 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 152332 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 152333 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=38, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 152334 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.s.SolrIndexSearcher Opening [Searcher@564c6c61[collection1] main]
[junit4] 2> 152335 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 152336 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 152336 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000
[junit4] 2> 152339 INFO
(searcherExecutor-698-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:36663_vlvxe%2Fei c:collection1
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher
Searcher@564c6c61[collection1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 152341 INFO
(coreLoadExecutor-697-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 x:collection1] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1557595804985720832
[junit4] 2> 152346 INFO
(coreZkRegister-692-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 c:collection1) [n:127.0.0.1:36663_vlvxe%2Fei c:collection1
s:shard1 r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to
recover:collection1
[junit4] 2> 152347 INFO
(updateExecutor-170-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 152347 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process.
recoveringAfterStartup=true
[junit4] 2> 152347 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
[junit4] 2> 152347 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates.
core=[collection1]
[junit4] 2> 152347 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.UpdateLog Starting to buffer updates.
FSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 152347 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core [collection1]
as recovering, leader is [http://127.0.0.1:34850/vlvxe/ei/collection1/] and I
am [http://127.0.0.1:36663/vlvxe/ei/collection1/]
[junit4] 2> 152350 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery command to
[http://127.0.0.1:34850/vlvxe/ei]; [WaitForState:
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:36663_vlvxe%252Fei&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
[junit4] 2> 152352 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp Going to wait for
coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader:
true, onlyIfLeaderActive: true
[junit4] 2> 152352 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp Will wait a max of
183 seconds to see collection1 (shard1 of collection1) have state: recovering
[junit4] 2> 152352 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp In
WaitForState(recovering): collection=collection1, shard=shard1,
thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true,
live=true, checkLive=true, currentState=down, localState=active,
nodeName=127.0.0.1:36663_vlvxe%2Fei, coreNodeName=core_node3,
onlyIfActiveCheckResult=false, nodeProps:
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:36663/vlvxe/ei","node_name":"127.0.0.1:36663_vlvxe%2Fei","state":"down"}
[junit4] 2> 152573 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.SolrTestCaseJ4 ###Starting test
[junit4] 2> 152573 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 30
for each attempt
[junit4] 2> 152573 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection:
collection1 failOnTimeout:true timeout (sec):30
[junit4] 2> 153352 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp In
WaitForState(recovering): collection=collection1, shard=shard1,
thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? true,
live=true, checkLive=true, currentState=recovering, localState=active,
nodeName=127.0.0.1:36663_vlvxe%2Fei, coreNodeName=core_node3,
onlyIfActiveCheckResult=false, nodeProps:
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:36663/vlvxe/ei","node_name":"127.0.0.1:36663_vlvxe%2Fei","state":"recovering"}
[junit4] 2> 153353 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.h.a.PrepRecoveryOp Waited
coreNodeName: core_node3, state: recovering, checkLive: true, onlyIfLeader:
true for: 1 seconds.
[junit4] 2> 153353 INFO (qtp1113643123-1234)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={nodeName=127.0.0.1:36663_vlvxe%252Fei&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=0 QTime=1000
[junit4] 2> 158521 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync from
[http://127.0.0.1:34850/vlvxe/ei/collection1/] - recoveringAfterStartup=[true]
[junit4] 2> 158521 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1
url=http://127.0.0.1:46668/vlvxe/ei START
replicas=[http://127.0.0.1:34850/vlvxe/ei/collection1/] nUpdates=1000
[junit4] 2> 158524 INFO (qtp1113643123-1230)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0
result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0,
maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
[junit4] 2> 158524 INFO (qtp1113643123-1230)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.c.S.Request [collection1] webapp=/vlvxe/ei path=/get
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
status=0 QTime=0
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0
result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0,
maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.PeerSync We are already in sync. No need to do a
PeerSync
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping
IW.commit.
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery was
successful.
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered during
PeerSync.
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 158525 INFO
(recoveryExecutor-164-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
[junit4] 2> 160353 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync from
[http://127.0.0.1:34850/vlvxe/ei/collection1/] - recoveringAfterStartup=[true]
[junit4] 2> 160353 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1
url=http://127.0.0.1:36663/vlvxe/ei START
replicas=[http://127.0.0.1:34850/vlvxe/ei/collection1/] nUpdates=1000
[junit4] 2> 160355 INFO (qtp1113643123-1229)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0
result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0,
maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
[junit4] 2> 160355 INFO (qtp1113643123-1229)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.c.S.Request [collection1] webapp=/vlvxe/ei path=/get
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
status=0 QTime=0
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint millis:0.0
result:{maxVersionSpecified=9223372036854775807, maxVersionEncountered=0,
maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, maxDoc=0}
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.PeerSync We are already in sync. No need to do a
PeerSync
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping
IW.commit.
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery was
successful.
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered during
PeerSync.
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
[junit4] 2> 160356 INFO
(recoveryExecutor-171-thread-1-processing-n:127.0.0.1:36663_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node3)
[n:127.0.0.1:36663_vlvxe%2Fei c:collection1 s:shard1 r:core_node3
x:collection1] o.a.s.c.RecoveryStrategy Registering as Active after recovery.
[junit4] 2> 160574 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
[junit4] 2> 160587 INFO (qtp1339177317-1191)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection s:shard1 r:core_node1
x:collection1] o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 160587 INFO (qtp1339177317-1191)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection s:shard1 r:core_node1
x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping
IW.commit.
[junit4] 2> 160588 INFO (qtp1339177317-1191)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection s:shard1 r:core_node1
x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 160588 INFO (qtp1339177317-1191)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
0 0
[junit4] 2> 160616 INFO (qtp931799052-1297) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2
start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 160616 INFO (qtp931799052-1297) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2
No uncommitted changes. Skipping IW.commit.
[junit4] 2> 160616 INFO (qtp260875601-1262) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2
start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 160616 INFO (qtp260875601-1262) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2
No uncommitted changes. Skipping IW.commit.
[junit4] 2> 160616 INFO (qtp931799052-1297) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 160616 INFO (qtp260875601-1262) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2
end_commit_flush
[junit4] 2> 160616 INFO (qtp931799052-1297) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 1
[junit4] 2> 160616 INFO (qtp260875601-1262) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 0
[junit4] 2> 160619 INFO (qtp1113643123-1233)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.DirectUpdateHandler2 start
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
[junit4] 2> 160619 INFO (qtp1113643123-1233)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping
IW.commit.
[junit4] 2> 160620 INFO (qtp1113643123-1233)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
[junit4] 2> 160620 INFO (qtp1113643123-1233)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
0 4
[junit4] 2> 160620 INFO (qtp1113643123-1236)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
0 30
[junit4] 2> 160623 INFO (qtp1113643123-1229)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.c.S.Request [collection1] webapp=/vlvxe/ei path=/select
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
hits=0 status=0 QTime=0
[junit4] 2> 160625 INFO (qtp260875601-1258) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request
[collection1] webapp=/vlvxe/ei path=/select
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
hits=0 status=0 QTime=0
[junit4] 2> 160626 INFO (qtp931799052-1293) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.S.Request
[collection1] webapp=/vlvxe/ei path=/select
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
hits=0 status=0 QTime=0
[junit4] 2> 162629 INFO (qtp1339177317-1196)
[n:127.0.0.1:42439_vlvxe%2Fei c:control_collection s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update params={wt=javabin&version=2}{deleteByQuery=*:*
(-1557595815771373568)} 0 2
[junit4] 2> 162635 INFO (qtp931799052-1298) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&_version_=-1557595815774519296&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{deleteByQuery=*:*
(-1557595815774519296)} 0 2
[junit4] 2> 162635 INFO (qtp260875601-1262) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&_version_=-1557595815774519296&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{deleteByQuery=*:*
(-1557595815774519296)} 0 1
[junit4] 2> 162635 INFO (qtp1113643123-1230)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update params={wt=javabin&version=2}{deleteByQuery=*:*
(-1557595815774519296)} 0 5
[junit4] 2> 162642 INFO (qtp260875601-1257) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[0
(1557595815782907904)]} 0 1
[junit4] 2> 162648 INFO (qtp931799052-1292) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[0
(1557595815782907904)]} 0 7
[junit4] 2> 162648 INFO (qtp1113643123-1236)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update params={wt=javabin&version=2}{add=[0
(1557595815782907904)]} 0 10
[junit4] 2> 162652 INFO (qtp260875601-1263) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[1
(1557595815795490816)]} 0 1
[junit4] 2> 162652 INFO (qtp931799052-1297) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[1
(1557595815795490816)]} 0 1
[junit4] 2> 162652 INFO (qtp1113643123-1232)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update params={wt=javabin&version=2}{add=[1
(1557595815795490816)]} 0 3
[junit4] 2> 162655 INFO (qtp931799052-1299) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[2
(1557595815798636544)]} 0 0
[junit4] 2> 162657 INFO (qtp260875601-1264) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[2
(1557595815798636544)]} 0 0
[junit4] 2> 162657 INFO (qtp1113643123-1229)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1]
webapp=/vlvxe/ei path=/update params={wt=javabin&version=2}{add=[2
(1557595815798636544)]} 0 4
[junit4] 2> 162659 INFO (qtp260875601-1262) [n:127.0.0.1:46668_vlvxe%2Fei
c:collection1 s:shard1 r:core_node2 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[3
(1557595815803879424)]} 0 0
[junit4] 2> 162659 INFO (qtp931799052-1299) [n:127.0.0.1:36663_vlvxe%2Fei
c:collection1 s:shard1 r:core_node3 x:collection1]
o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp=/vlvxe/ei
path=/update
params={update.distrib=FROMLEADER&distrib.from=http://127.0.0.1:34850/vlvxe/ei/collection1/&wt=javabin&version=2}{add=[3
(1557595815803879424)]} 0 0
[junit4] 2> 162659 INFO (qtp1113643123-1230)
[n:127.0.0.1:34850_vlvxe%2Fei c:collection1 s:shard1 r:core_node1
x:collection1] o.a.s.u.p.LogUpdateProcessorFactory [collection1] webapp
[...truncated too long message...]
.0.1:46668_vlvxe%252Fei&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node2&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=400 QTime=132630
[junit4] 2> 348509 INFO (qtp1113643123-1384)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={nodeName=127.0.0.1:46668_vlvxe%252Fei&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node2&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=400 QTime=112612
[junit4] 2> 348509 INFO (qtp1113643123-1383)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.s.HttpSolrCall [admin] webapp=null
path=/admin/cores
params={nodeName=127.0.0.1:46668_vlvxe%252Fei&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node2&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
status=400 QTime=152654
[junit4] 2> 349380 WARN
(zkCallback-160-thread-3-processing-n:127.0.0.1:34850_vlvxe%2Fei)
[n:127.0.0.1:34850_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader ZooKeeper watch
triggered, but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for
/live_nodes]
[junit4] 2> 349381 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.node
[junit4] 2> 349381 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.AbstractConnector Stopped
ServerConnector@1b4de1e6{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
[junit4] 2> 349382 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@35168835{/vlvxe/ei,null,UNAVAILABLE}
[junit4] 2> 349386 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ChaosMonkey monkey: stop shard! 46668
[junit4] 2> 349386 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.CoreContainer Shutting down CoreContainer instance=917253553
[junit4] 2> 349386 WARN
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.RecoveryStrategy Stopping recovery for core=[collection1]
coreNodeName=[core_node2]
[junit4] 2> 349389 WARN
(updateExecutor-177-thread-2-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.u.DefaultSolrCoreState Skipping recovery because Solr is
shutdown
[junit4] 2> 352998 INFO
(recoveryExecutor-178-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy RecoveryStrategy has been closed
[junit4] 2> 352998 INFO
(recoveryExecutor-178-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Finished recovery process,
successful=[false]
[junit4] 2> 352998 INFO
(recoveryExecutor-178-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.SolrCore [collection1] CLOSING SolrCore
org.apache.solr.core.SolrCore@433cf75e
[junit4] 2> 352998 WARN
(recoveryExecutor-178-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.c.RecoveryStrategy Stopping recovery for
core=[collection1] coreNodeName=[core_node2]
[junit4] 2> 353031 INFO
(recoveryExecutor-178-thread-1-processing-n:127.0.0.1:46668_vlvxe%2Fei
x:collection1 s:shard1 c:collection1 r:core_node2)
[n:127.0.0.1:46668_vlvxe%2Fei c:collection1 s:shard1 r:core_node2
x:collection1] o.a.s.m.SolrMetricManager Closing metric reporters for:
solr.core.collection1
[junit4] 2> 353031 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.Overseer Overseer
(id=97349737342435345-127.0.0.1:46668_vlvxe%2Fei-n_0000000004) closing
[junit4] 2> 353031 INFO
(OverseerStateUpdate-97349737342435345-127.0.0.1:46668_vlvxe%2Fei-n_0000000004)
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.Overseer Overseer Loop exiting :
127.0.0.1:46668_vlvxe%2Fei
[junit4] 2> 353033 WARN
(zkCallback-180-thread-2-processing-n:127.0.0.1:46668_vlvxe%2Fei)
[n:127.0.0.1:46668_vlvxe%2Fei ] o.a.s.c.c.ZkStateReader ZooKeeper watch
triggered, but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for
/live_nodes]
[junit4] 2> 353033 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.node
[junit4] 2> 353033 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.AbstractConnector Stopped
ServerConnector@e7a8796{HTTP/1.1,[http/1.1]}{127.0.0.1:46668}
[junit4] 2> 353033 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.e.j.s.h.ContextHandler Stopped
o.e.j.s.ServletContextHandler@6b852f6{/vlvxe/ei,null,UNAVAILABLE}
[junit4] 2> 353034 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ChaosMonkey monkey: stop shard! 36663
[junit4] 2> 353034 INFO
(TEST-PeerSyncReplicationTest.test-seed#[52FFFC7F0CF16ACE]) [ ]
o.a.s.c.ZkTestServer connecting to 127.0.0.1:42121 42121
[junit4] 2> 353060 INFO (Thread-346) [ ] o.a.s.c.ZkTestServer
connecting to 127.0.0.1:42121 42121
[junit4] 2> 353060 WARN (Thread-346) [ ] o.a.s.c.ZkTestServer Watch
limit violations:
[junit4] 2> Maximum concurrent create/delete watches above limit:
[junit4] 2>
[junit4] 2> 6 /solr/aliases.json
[junit4] 2> 6 /solr/clusterprops.json
[junit4] 2> 5 /solr/security.json
[junit4] 2> 5 /solr/configs/conf1
[junit4] 2> 4 /solr/collections/collection1/state.json
[junit4] 2>
[junit4] 2> Maximum concurrent data watches above limit:
[junit4] 2>
[junit4] 2> 6 /solr/clusterstate.json
[junit4] 2> 2
/solr/collections/collection1/leader_elect/shard1/election/97349737342435336-core_node1-n_0000000000
[junit4] 2> 2
/solr/overseer_elect/election/97349737342435336-127.0.0.1:34850_vlvxe%2Fei-n_0000000001
[junit4] 2>
[junit4] 2> Maximum concurrent children watches above limit:
[junit4] 2>
[junit4] 2> 207 /solr/overseer/collection-queue-work
[junit4] 2> 31 /solr/overseer/queue
[junit4] 2> 7 /solr/overseer/queue-work
[junit4] 2> 6 /solr/collections
[junit4] 2> 5 /solr/live_nodes
[junit4] 2>
[junit4] 2> NOTE: reproduce with: ant test
-Dtestcase=PeerSyncReplicationTest -Dtests.method=test
-Dtests.seed=52FFFC7F0CF16ACE -Dtests.multiplier=3 -Dtests.slow=true
-Dtests.locale=da-DK -Dtests.timezone=Europe/Vienna -Dtests.asserts=true
-Dtests.file.encoding=ISO-8859-1
[junit4] FAILURE 208s J1 | PeerSyncReplicationTest.test <<<
[junit4] > Throwable #1: java.lang.AssertionError: timeout waiting to see
all nodes active
[junit4] > at
__randomizedtesting.SeedInfo.seed([52FFFC7F0CF16ACE:DAABC3A5A20D0736]:0)
[junit4] > at
org.apache.solr.cloud.PeerSyncReplicationTest.waitTillNodesActive(PeerSyncReplicationTest.java:326)
[junit4] > at
org.apache.solr.cloud.PeerSyncReplicationTest.bringUpDeadNodeAndEnsureNoReplication(PeerSyncReplicationTest.java:277)
[junit4] > at
org.apache.solr.cloud.PeerSyncReplicationTest.forceNodeFailureAndDoPeerSync(PeerSyncReplicationTest.java:259)
[junit4] > at
org.apache.solr.cloud.PeerSyncReplicationTest.test(PeerSyncReplicationTest.java:138)
[junit4] > at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:985)
[junit4] > at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:960)
[junit4] > at java.lang.Thread.run(Thread.java:745)
[junit4] 2> 353063 INFO
(SUITE-PeerSyncReplicationTest-seed#[52FFFC7F0CF16ACE]-worker) [ ]
o.a.s.SolrTestCaseJ4 ###deleteCore
[junit4] 2> NOTE: leaving temporary files on disk at:
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.PeerSyncReplicationTest_52FFFC7F0CF16ACE-001
[junit4] 2> Jan 26, 2017 2:01:55 PM
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
[junit4] 2> WARNING: Will linger awaiting termination of 2 leaked
thread(s).
[junit4] 2> NOTE: test params are:
codec=FastDecompressionCompressingStoredFields(storedFieldsFormat=CompressingStoredFieldsFormat(compressionMode=FAST_DECOMPRESSION,
chunkSize=20939, maxDocsPerChunk=594, blockSize=5),
termVectorsFormat=CompressingTermVectorsFormat(compressionMode=FAST_DECOMPRESSION,
chunkSize=20939, blockSize=5)), sim=RandomSimilarity(queryNorm=true): {},
locale=da-DK, timezone=Europe/Vienna
[junit4] 2> NOTE: Linux 4.4.0-53-generic amd64/Oracle Corporation
1.8.0_121 (64-bit)/cpus=12,threads=1,free=281888296,total=509149184
[junit4] 2> NOTE: All tests run in this JVM: [TestExactStatsCache,
BlockDirectoryTest, TestUniqueKeyFieldResource, SpellCheckCollatorTest,
TestXmlQParserPlugin, IndexSchemaTest, TestSolrCoreParser,
TestPerFieldSimilarity, TolerantUpdateProcessorTest, TestLegacyFieldCache,
CustomCollectionTest, TestIntervalFaceting, TestHdfsBackupRestoreCore,
OverseerRolesTest, DistributedIntervalFacetingTest, TestRandomFlRTGCloud,
TestTestInjection, JSONWriterTest, IndexBasedSpellCheckerTest,
TestCursorMarkWithoutUniqueKey, TestFastLRUCache, TestGraphTermsQParserPlugin,
PeerSyncReplicationTest]
[junit4] Completed [122/684 (1!)] on J1 in 208.69s, 1 test, 1 failure <<<
FAILURES!
[...truncated 64255 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]