Build: https://jenkins.thetaphi.de/job/Lucene-Solr-6.x-Solaris/943/
Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC

1 tests failed.
FAILED:  org.apache.solr.update.TestInPlaceUpdatesDistrib.test

Error Message:
The replica receiving reordered updates must not have gone down expected:<3> 
but was:<2>

Stack Trace:
java.lang.AssertionError: The replica receiving reordered updates must not have 
gone down expected:<3> but was:<2>
        at 
__randomizedtesting.SeedInfo.seed([74DC3FCD315D8132:FC8800179FA1ECCA]:0)
        at org.junit.Assert.fail(Assert.java:93)
        at org.junit.Assert.failNotEquals(Assert.java:647)
        at org.junit.Assert.assertEquals(Assert.java:128)
        at org.junit.Assert.assertEquals(Assert.java:472)
        at 
org.apache.solr.update.TestInPlaceUpdatesDistrib.delayedReorderingFetchesMissingUpdateFromLeaderTest(TestInPlaceUpdatesDistrib.java:881)
        at 
org.apache.solr.update.TestInPlaceUpdatesDistrib.test(TestInPlaceUpdatesDistrib.java:157)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)




Build Log:
[...truncated 10992 lines...]
   [junit4] Suite: org.apache.solr.update.TestInPlaceUpdatesDistrib
   [junit4]   2> Creating dataDir: 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/init-core-data-001
   [junit4]   2> 97083 WARN  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=5 numCloses=5
   [junit4]   2> 97083 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using PointFields
   [junit4]   2> 97089 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (false) via: 
@org.apache.solr.util.RandomizeSSL(reason=, value=NaN, ssl=NaN, clientAuth=NaN)
   [junit4]   2> 97090 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: 
/py_f/yw
   [junit4]   2> 97094 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.SolrTestCaseJ4 ####initCore
   [junit4]   2> 97094 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.c.SolrResourceLoader [null] Added 2 libs to classloader, from paths: 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/lib,
 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/lib/classes]
   [junit4]   2> 97129 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.7.0
   [junit4]   2> 97143 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.s.IndexSchema [null] Schema name=inplace-updates
   [junit4]   2> 97150 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.s.IndexSchema Loaded schema inplace-updates/1.6 with uniqueid field id
   [junit4]   2> 97154 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=30000&connTimeout=30000&retry=true
   [junit4]   2> 97211 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 97214 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm') 
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 97214 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry 
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 97217 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.SolrResourceLoader [null] Added 2 libs to classloader, 
from paths: 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/lib,
 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/lib/classes]
   [junit4]   2> 97254 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.7.0
   [junit4]   2> 97299 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.s.IndexSchema [collection1] Schema name=inplace-updates
   [junit4]   2> 97303 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.s.IndexSchema Loaded schema inplace-updates/1.6 with 
uniqueid field id
   [junit4]   2> 97303 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.CoreContainer Creating SolrCore 'collection1' using 
configuration from instancedir 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1,
 trusted=true
   [junit4]   2> 97303 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.collection1' (registry 'solr.core.collection1') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 97303 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 97303 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1],
 
dataDir=[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/init-core-data-001/]
   [junit4]   2> 97304 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr 
mbeans to JMX Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 97318 WARN  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler 
{type = requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 97412 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 97412 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= 
defaultSyncLevel=FLUSH numRecordsToKeep=100 maxNumLogsToKeep=10 
numVersionBuckets=65536
   [junit4]   2> 97413 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 97413 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 97414 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@3338325b[collection1] main]
   [junit4]   2> 97414 WARN  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.r.ManagedResourceStorage Cannot write to config directory 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf;
 switching to use InMemory storage instead.
   [junit4]   2> 97414 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 97415 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache 
for 2147483647 transient cores
   [junit4]   2> 97415 INFO  (coreLoadExecutor-514-thread-1) [    
x:collection1] o.a.s.u.UpdateLog Could not find max version in index or recent 
updates, using new clock 1571803428081893376
   [junit4]   2> 97421 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.SolrTestCaseJ4 ####initCore end
   [junit4]   2> 97422 INFO  
(searcherExecutor-515-thread-1-processing-x:collection1) [    x:collection1] 
o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@3338325b[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 97431 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 97431 INFO  (Thread-427) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 97431 INFO  (Thread-427) [    ] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 97440 ERROR (Thread-427) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 97532 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:42694
   [junit4]   2> 97568 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 97577 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/schema-inplace-updates.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 97589 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 97591 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 97638 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 97650 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 97652 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 97653 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 97655 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 97657 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 97659 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 97858 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/control-001/cores/collection1
   [junit4]   2> 97862 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 97866 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@5a77f147{/py_f/yw,null,AVAILABLE}
   [junit4]   2> 97866 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@6e318506{SSL,[ssl, 
http/1.1]}{127.0.0.1:42914}
   [junit4]   2> 97866 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server Started @100012ms
   [junit4]   2> 97867 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/tempDir-001/control/data,
 hostContext=/py_f/yw, hostPort=42914, 
coreRootDirectory=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/../../../../../../../../../../export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/control-001/cores}
   [junit4]   2> 97867 ERROR 
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 97867 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.7.0
   [junit4]   2> 97867 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 97867 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 97867 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-07-02T09:42:40.253Z
   [junit4]   2> 97883 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 97884 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/control-001/solr.xml
   [junit4]   2> 97892 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 97893 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42694/solr
   [junit4]   2> 97937 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 97939 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.OverseerElectionContext I am going to 
be the leader 127.0.0.1:42914_py_f%2Fyw
   [junit4]   2> 97941 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.Overseer Overseer 
(id=98237714256297989-127.0.0.1:42914_py_f%2Fyw-n_0000000000) starting
   [junit4]   2> 97955 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:42914_py_f%2Fyw
   [junit4]   2> 97959 INFO  
(zkCallback-190-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (0) -> (1)
   [junit4]   2> 98068 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/../../../../../../../../../../export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/control-001/cores
   [junit4]   2> 98068 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Cores are: 
[collection1]
   [junit4]   2> 98070 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 98081 INFO  
(OverseerStateUpdate-98237714256297989-127.0.0.1:42914_py_f%2Fyw-n_0000000000) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.o.ReplicaMutator Assigning new node 
to shard shard=shard1
   [junit4]   2> 99145 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.SolrConfig Using Lucene MatchVersion: 6.7.0
   [junit4]   2> 99169 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.s.IndexSchema [collection1] Schema name=inplace-updates
   [junit4]   2> 99173 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.s.IndexSchema Loaded schema inplace-updates/1.6 with uniqueid field id
   [junit4]   2> 99173 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from 
collection control_collection, trusted=true
   [junit4]   2> 99174 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 99174 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/control-001/cores/collection1],
 
dataDir=[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/../../../../../../../../../../export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/control-001/cores/collection1/data/]
   [junit4]   2> 99174 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 99184 WARN  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 99261 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 99261 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 99262 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 99262 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 99268 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@bf79c96[collection1] main]
   [junit4]   2> 99269 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 99270 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 99270 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 99270 INFO  
(coreLoadExecutor-527-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw c:control_collection   x:collection1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1571803430027001856
   [junit4]   2> 99279 INFO  
(searcherExecutor-528-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection   x:collection1] o.a.s.c.SolrCore [collection1] Registered 
new searcher Searcher@bf79c96[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 99323 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 99323 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 99323 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
Sync replicas to https://127.0.0.1:42914/py_f/yw/collection1/
   [junit4]   2> 99323 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
Sync Success - now sync replicas to me
   [junit4]   2> 99323 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
https://127.0.0.1:42914/py_f/yw/collection1/ has no replicas
   [junit4]   2> 99323 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in 
election, clear LIR
   [junit4]   2> 99330 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
https://127.0.0.1:42914/py_f/yw/collection1/ shard1
   [junit4]   2> 99444 INFO  
(coreZkRegister-520-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw 
x:collection1 c:control_collection) [n:127.0.0.1:42914_py_f%2Fyw 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController 
I am the leader, no recovery necessary
   [junit4]   2> 99621 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 99622 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:42694/solr ready
   [junit4]   2> 99622 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection 
loss:false
   [junit4]   2> 99623 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Creating collection1 with stateFormat=2
   [junit4]   2> 99797 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001/cores/collection1
   [junit4]   2> 99798 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001
   [junit4]   2> 99800 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 99804 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@776a291a{/py_f/yw,null,AVAILABLE}
   [junit4]   2> 99806 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@2c658331{SSL,[ssl, 
http/1.1]}{127.0.0.1:39625}
   [junit4]   2> 99808 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server Started @101954ms
   [junit4]   2> 99808 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/tempDir-001/jetty1,
 solrconfig=solrconfig.xml, hostContext=/py_f/yw, hostPort=39625, 
coreRootDirectory=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001/cores}
   [junit4]   2> 99808 ERROR 
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 99809 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.7.0
   [junit4]   2> 99809 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 99809 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 99810 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-07-02T09:42:42.196Z
   [junit4]   2> 99819 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 99819 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001/solr.xml
   [junit4]   2> 99827 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 99828 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42694/solr
   [junit4]   2> 99897 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (0) -> (1)
   [junit4]   2> 99905 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 99916 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:39625_py_f%2Fyw
   [junit4]   2> 99925 INFO  (zkCallback-194-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 99928 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (1) -> (2)
   [junit4]   2> 99929 INFO  
(zkCallback-190-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (1) -> (2)
   [junit4]   2> 100037 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001/cores
   [junit4]   2> 100037 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Cores are: 
[collection1]
   [junit4]   2> 100039 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 100045 INFO  
(OverseerStateUpdate-98237714256297989-127.0.0.1:42914_py_f%2Fyw-n_0000000000) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.o.ReplicaMutator Assigning new node 
to shard shard=shard1
   [junit4]   2> 100151 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 101065 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.7.0
   [junit4]   2> 101107 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=inplace-updates
   [junit4]   2> 101114 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.s.IndexSchema 
Loaded schema inplace-updates/1.6 with uniqueid field id
   [junit4]   2> 101114 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 101114 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 101115 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001/cores/collection1],
 
dataDir=[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 101115 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 101129 WARN  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 101201 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 101201 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 101201 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 101201 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 101209 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@249b3561[collection1] main]
   [junit4]   2> 101221 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 101221 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 101221 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 101222 INFO  
(coreLoadExecutor-538-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw c:collection1   x:collection1] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1571803432073822208
   [junit4]   2> 101229 INFO  
(searcherExecutor-539-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1   
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@249b3561[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 101308 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough 
replicas found to continue.
   [junit4]   2> 101308 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may 
be the new leader - try and sync
   [junit4]   2> 101308 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:39625/py_f/yw/collection1/
   [junit4]   2> 101309 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now 
sync replicas to me
   [junit4]   2> 101309 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
https://127.0.0.1:39625/py_f/yw/collection1/ has no replicas
   [junit4]   2> 101309 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Found 
all replicas participating in election, clear LIR
   [junit4]   2> 101405 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am 
the new leader: https://127.0.0.1:39625/py_f/yw/collection1/ shard1
   [junit4]   2> 101508 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 101521 INFO  
(coreZkRegister-533-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:39625_py_f%2Fyw c:collection1 
s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no 
recovery necessary
   [junit4]   2> 101627 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [2])
   [junit4]   2> 101775 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001/cores/collection1
   [junit4]   2> 101775 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001
   [junit4]   2> 101777 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 101779 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@56f9b66b{/py_f/yw,null,AVAILABLE}
   [junit4]   2> 101780 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@26e02f21{SSL,[ssl, 
http/1.1]}{127.0.0.1:56126}
   [junit4]   2> 101780 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server Started @103926ms
   [junit4]   2> 101780 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/tempDir-001/jetty2,
 solrconfig=solrconfig.xml, hostContext=/py_f/yw, hostPort=56126, 
coreRootDirectory=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001/cores}
   [junit4]   2> 101780 ERROR 
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 101786 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.7.0
   [junit4]   2> 101786 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 101786 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 101786 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-07-02T09:42:44.172Z
   [junit4]   2> 101797 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 101797 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001/solr.xml
   [junit4]   2> 101828 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 101829 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42694/solr
   [junit4]   2> 101969 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (0) -> (2)
   [junit4]   2> 101973 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 101984 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:56126_py_f%2Fyw
   [junit4]   2> 101988 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (2) -> (3)
   [junit4]   2> 101991 INFO  (zkCallback-194-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 101992 INFO  
(zkCallback-190-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (2) -> (3)
   [junit4]   2> 101995 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (2) -> (3)
   [junit4]   2> 102083 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001/cores
   [junit4]   2> 102084 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Cores are: 
[collection1]
   [junit4]   2> 102089 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 102091 INFO  
(OverseerStateUpdate-98237714256297989-127.0.0.1:42914_py_f%2Fyw-n_0000000000) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.o.ReplicaMutator Assigning new node 
to shard shard=shard1
   [junit4]   2> 102198 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 102198 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 103234 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.7.0
   [junit4]   2> 103311 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=inplace-updates
   [junit4]   2> 103314 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.s.IndexSchema 
Loaded schema inplace-updates/1.6 with uniqueid field id
   [junit4]   2> 103314 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 103314 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 103315 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001/cores/collection1],
 
dataDir=[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 103315 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 103330 WARN  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 103410 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 103410 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 103411 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 103411 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 103420 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@489ea3a6[collection1] main]
   [junit4]   2> 103427 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 103428 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 103428 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 103430 INFO  
(searcherExecutor-550-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:56126_py_f%2Fyw c:collection1   
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@489ea3a6[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 103430 INFO  
(coreLoadExecutor-549-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1   x:collection1] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1571803434389078016
   [junit4]   2> 103449 INFO  
(coreZkRegister-544-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:56126_py_f%2Fyw c:collection1 
s:shard1 r:core_node2 x:collection1] o.a.s.c.ZkController Core needs to 
recover:collection1
   [junit4]   2> 103450 INFO  
(updateExecutor-203-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DefaultSolrCoreState 
Running recovery
   [junit4]   2> 103451 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Starting recovery process. recoveringAfterStartup=true
   [junit4]   2> 103452 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
###### startupVersions=[[]]
   [junit4]   2> 103452 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Begin buffering updates. core=[collection1]
   [junit4]   2> 103452 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.UpdateLog Starting 
to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 103452 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Publishing state of core [collection1] as recovering, leader is 
[https://127.0.0.1:39625/py_f/yw/collection1/] and I am 
[https://127.0.0.1:56126/py_f/yw/collection1/]
   [junit4]   2> 103458 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Sending prep recovery command to [https://127.0.0.1:39625/py_f/yw]; 
[WaitForState: 
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:56126_py_f%252Fyw&coreNodeName=core_node2&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 103508 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node2, state: 
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 103509 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 
(shard1 of collection1) have state: recovering
   [junit4]   2> 103509 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): 
collection=collection1, shard=shard1, thisCore=collection1, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=down, localState=active, nodeName=127.0.0.1:56126_py_f%2Fyw, 
coreNodeName=core_node2, onlyIfActiveCheckResult=false, nodeProps: 
core_node2:{"core":"collection1","base_url":"https://127.0.0.1:56126/py_f/yw","node_name":"127.0.0.1:56126_py_f%2Fyw","state":"down"}
   [junit4]   2> 103577 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 103579 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [3])
   [junit4]   2> 103810 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001/cores/collection1
   [junit4]   2> 103810 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001
   [junit4]   2> 103813 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server jetty-9.3.14.v20161028
   [junit4]   2> 103816 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@70cfae0f{/py_f/yw,null,AVAILABLE}
   [junit4]   2> 103817 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@6947848e{SSL,[ssl, 
http/1.1]}{127.0.0.1:38227}
   [junit4]   2> 103817 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.Server Started @105964ms
   [junit4]   2> 103817 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/tempDir-001/jetty3,
 solrconfig=solrconfig.xml, hostContext=/py_f/yw, hostPort=38227, 
coreRootDirectory=/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001/cores}
   [junit4]   2> 103817 ERROR 
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 103825 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
6.7.0
   [junit4]   2> 103825 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 103825 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 103825 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-07-02T09:42:46.211Z
   [junit4]   2> 103954 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 103954 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001/solr.xml
   [junit4]   2> 103964 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 103969 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:42694/solr
   [junit4]   2> 104005 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (0) -> (3)
   [junit4]   2> 104008 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 104015 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:38227_py_f%2Fyw
   [junit4]   2> 104017 INFO  (zkCallback-194-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 104056 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (3) -> (4)
   [junit4]   2> 104057 INFO  
(zkCallback-190-thread-1-processing-n:127.0.0.1:42914_py_f%2Fyw) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (3) -> (4)
   [junit4]   2> 104063 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (3) -> (4)
   [junit4]   2> 104068 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (3) -> (4)
   [junit4]   2> 104252 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Found 1 core 
definitions underneath 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001/cores
   [junit4]   2> 104252 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.CorePropertiesLocator Cores are: 
[collection1]
   [junit4]   2> 104263 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 104287 INFO  
(OverseerStateUpdate-98237714256297989-127.0.0.1:42914_py_f%2Fyw-n_0000000000) 
[n:127.0.0.1:42914_py_f%2Fyw    ] o.a.s.c.o.ReplicaMutator Assigning new node 
to shard shard=shard1
   [junit4]   2> 104439 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 104448 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 104448 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 104515 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): 
collection=collection1, shard=shard1, thisCore=collection1, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=recovering, localState=active, nodeName=127.0.0.1:56126_py_f%2Fyw, 
coreNodeName=core_node2, onlyIfActiveCheckResult=false, nodeProps: 
core_node2:{"core":"collection1","base_url":"https://127.0.0.1:56126/py_f/yw","node_name":"127.0.0.1:56126_py_f%2Fyw","state":"recovering"}
   [junit4]   2> 104515 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node2, state: 
recovering, checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 104515 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:56126_py_f%252Fyw&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node2&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=1008
   [junit4]   2> 105021 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Attempting to PeerSync from [https://127.0.0.1:39625/py_f/yw/collection1/] - 
recoveringAfterStartup=[true]
   [junit4]   2> 105021 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.PeerSync PeerSync: 
core=collection1 url=https://127.0.0.1:56126/py_f/yw START 
replicas=[https://127.0.0.1:39625/py_f/yw/collection1/] nUpdates=100
   [junit4]   2> 105038 INFO  (qtp833328225-1367) [n:127.0.0.1:39625_py_f%2Fyw 
c:collection1 s:shard1 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 105038 INFO  (qtp833328225-1367) [n:127.0.0.1:39625_py_f%2Fyw 
c:collection1 s:shard1 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp=/py_f/yw path=/get 
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
 status=0 QTime=1
   [junit4]   2> 105040 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 105040 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.PeerSync We are 
already in sync. No need to do a PeerSync 
   [junit4]   2> 105040 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 105040 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 105041 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 105041 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
PeerSync stage of recovery was successful.
   [junit4]   2> 105041 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Replaying updates buffered during PeerSync.
   [junit4]   2> 105041 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy No 
replay needed.
   [junit4]   2> 105041 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.RecoveryStrategy 
Registering as Active after recovery.
   [junit4]   2> 105148 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 105149 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 105149 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 105296 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.7.0
   [junit4]   2> 105340 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=inplace-updates
   [junit4]   2> 105351 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.s.IndexSchema 
Loaded schema inplace-updates/1.6 with uniqueid field id
   [junit4]   2> 105351 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.CoreContainer Creating SolrCore 'collection1' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 105351 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 105352 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.c.SolrCore 
[[collection1] ] Opening new SolrCore at 
[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001/cores/collection1],
 
dataDir=[/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 105352 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@4c490d54
   [junit4]   2> 105368 WARN  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 105451 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 105451 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 105452 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 105452 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 105453 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@509be90c[collection1] main]
   [junit4]   2> 105471 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 105475 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 105475 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 105475 INFO  
(coreLoadExecutor-560-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1   x:collection1] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1571803436533415936
   [junit4]   2> 105504 INFO  
(searcherExecutor-561-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:38227_py_f%2Fyw c:collection1   
x:collection1] o.a.s.c.SolrCore [collection1] Registered new searcher 
Searcher@509be90c[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 105513 INFO  
(coreZkRegister-555-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 c:collection1) [n:127.0.0.1:38227_py_f%2Fyw c:collection1 
s:shard1 r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to 
recover:collection1
   [junit4]   2> 105514 INFO  
(updateExecutor-210-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState 
Running recovery
   [junit4]   2> 105519 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Starting recovery process. recoveringAfterStartup=true
   [junit4]   2> 105523 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
###### startupVersions=[[]]
   [junit4]   2> 105523 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Begin buffering updates. core=[collection1]
   [junit4]   2> 105523 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting 
to buffer updates. FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 105523 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Publishing state of core [collection1] as recovering, leader is 
[https://127.0.0.1:39625/py_f/yw/collection1/] and I am 
[https://127.0.0.1:38227/py_f/yw/collection1/]
   [junit4]   2> 105548 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Sending prep recovery command to [https://127.0.0.1:39625/py_f/yw]; 
[WaitForState: 
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:38227_py_f%252Fyw&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 105587 INFO  (qtp833328225-1373) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: 
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 105591 INFO  (qtp833328225-1373) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 
(shard1 of collection1) have state: recovering
   [junit4]   2> 105591 INFO  (qtp833328225-1373) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): 
collection=collection1, shard=shard1, thisCore=collection1, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=down, localState=active, nodeName=127.0.0.1:38227_py_f%2Fyw, 
coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"https://127.0.0.1:38227/py_f/yw","node_name":"127.0.0.1:38227_py_f%2Fyw","state":"down"}
   [junit4]   2> 105660 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) 
[n:127.0.0.1:39625_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 105661 INFO  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 105661 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [4])
   [junit4]   2> 105772 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 105772 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):330
   [junit4]   1> -
   [junit4]   1> replica:core_node1 rstate:active live:true
   [junit4]   1> replica:core_node2 rstate:active live:true
   [junit4]   1> replica:core_node3 rstate:recovering live:true
   [junit4]   2> 106594 INFO  (qtp833328225-1373) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): 
collection=collection1, shard=shard1, thisCore=collection1, 
leaderDoesNotNeedRecovery=false, isLeader? true, live=true, checkLive=true, 
currentState=recovering, localState=active, nodeName=127.0.0.1:38227_py_f%2Fyw, 
coreNodeName=core_node3, onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"https://127.0.0.1:38227/py_f/yw","node_name":"127.0.0.1:38227_py_f%2Fyw","state":"recovering"}
   [junit4]   2> 106595 INFO  (qtp833328225-1373) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: 
recovering, checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 106595 INFO  (qtp833328225-1373) [n:127.0.0.1:39625_py_f%2Fyw  
  ] o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:38227_py_f%252Fyw&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=1007
   [junit4]   1> -
   [junit4]   1> replica:core_node1 rstate:active live:true
   [junit4]   1> replica:core_node2 rstate:active live:true
   [junit4]   1> replica:core_node3 rstate:recovering live:true
   [junit4]   2> 107098 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Attempting to PeerSync from [https://127.0.0.1:39625/py_f/yw/collection1/] - 
recoveringAfterStartup=[true]
   [junit4]   2> 107098 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: 
core=collection1 url=https://127.0.0.1:38227/py_f/yw START 
replicas=[https://127.0.0.1:39625/py_f/yw/collection1/] nUpdates=100
   [junit4]   2> 107108 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw 
c:collection1 s:shard1 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 107108 INFO  (qtp833328225-1371) [n:127.0.0.1:39625_py_f%2Fyw 
c:collection1 s:shard1 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp=/py_f/yw path=/get 
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
 status=0 QTime=0
   [junit4]   2> 107109 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 107109 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.PeerSync We are 
already in sync. No need to do a PeerSync 
   [junit4]   2> 107109 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 107109 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 107110 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 107110 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
PeerSync stage of recovery was successful.
   [junit4]   2> 107110 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Replaying updates buffered during PeerSync.
   [junit4]   2> 107110 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No 
replay needed.
   [junit4]   2> 107110 INFO  
(recoveryExecutor-211-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node3) [n:127.0.0.1:38227_py_f%2Fyw 
c:collection1 s:shard1 r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy 
Registering as Active after recovery.
   [junit4]   2> 107216 INFO  
(zkCallback-200-thread-1-processing-n:127.0.0.1:39625_py_f%2Fyw) [n:12

[...truncated too long message...]

Fyw x:collection1 s:shard1 c:collection1 r:core_node2) 
[n:127.0.0.1:56126_py_f%2Fyw c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.c.RecoveryStrategy Stopping recovery for core=[collection1] 
coreNodeName=[core_node2]
   [junit4]   2> 147841 INFO  
(recoveryExecutor-204-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.m.SolrMetricManager 
Closing metric reporters for: solr.core.collection1
   [junit4]   2> 147851 WARN  
(updateExecutor-203-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw 
x:collection1 s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:56126_py_f%2Fyw 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DefaultSolrCoreState 
Skipping recovery because Solr is shutdown
   [junit4]   2> 147857 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.Overseer Overseer 
(id=98237714256297997-127.0.0.1:56126_py_f%2Fyw-n_0000000002) closing
   [junit4]   2> 147858 INFO  
(OverseerStateUpdate-98237714256297997-127.0.0.1:56126_py_f%2Fyw-n_0000000002) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.Overseer Overseer Loop exiting : 
127.0.0.1:56126_py_f%2Fyw
   [junit4]   2> 147863 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.OverseerElectionContext I am going to 
be the leader 127.0.0.1:38227_py_f%2Fyw
   [junit4]   2> 147866 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.Overseer Overseer 
(id=98237714256298000-127.0.0.1:38227_py_f%2Fyw-n_0000000003) starting
   [junit4]   2> 147882 INFO  
(zkCallback-213-thread-2-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader Updated live nodes 
from ZooKeeper... (2) -> (1)
   [junit4]   2> 149363 WARN  
(zkCallback-206-thread-1-processing-n:127.0.0.1:56126_py_f%2Fyw) 
[n:127.0.0.1:56126_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader ZooKeeper watch 
triggered, but Solr cannot talk to ZK: [KeeperErrorCode = Session expired for 
/live_nodes]
   [junit4]   2> 149365 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.AbstractConnector Stopped ServerConnector@26e02f21{SSL,[ssl, 
http/1.1]}{127.0.0.1:0}
   [junit4]   2> 149365 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@56f9b66b{/py_f/yw,null,UNAVAILABLE}
   [junit4]   2> 149367 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ChaosMonkey monkey: stop shard! 38227
   [junit4]   2> 149367 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.CoreContainer Shutting down CoreContainer instance=497015078
   [junit4]   2> 149367 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.node
   [junit4]   2> 149368 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.jvm
   [junit4]   2> 149368 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.jetty
   [junit4]   2> 149387 INFO  (coreCloseExecutor-580-thread-1) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1 s:shard1 r:core_node3 x:collection1] 
o.a.s.c.SolrCore [collection1]  CLOSING SolrCore 
org.apache.solr.core.SolrCore@45b7729e
   [junit4]   2> 149393 INFO  
(zkCallback-213-thread-1-processing-n:127.0.0.1:38227_py_f%2Fyw) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.c.ZkStateReader A cluster state 
change: [WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/collection1/state.json] for collection [collection1] has 
occurred - updating... (live nodes size: [1])
   [junit4]   2> 149429 INFO  (coreCloseExecutor-580-thread-1) 
[n:127.0.0.1:38227_py_f%2Fyw c:collection1 s:shard1 r:core_node3 x:collection1] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.core.collection1
   [junit4]   2> 149439 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.Overseer Overseer 
(id=98237714256298000-127.0.0.1:38227_py_f%2Fyw-n_0000000003) closing
   [junit4]   2> 149439 INFO  
(OverseerStateUpdate-98237714256298000-127.0.0.1:38227_py_f%2Fyw-n_0000000003) 
[n:127.0.0.1:38227_py_f%2Fyw    ] o.a.s.c.Overseer Overseer Loop exiting : 
127.0.0.1:38227_py_f%2Fyw
   [junit4]   2> 149453 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.AbstractConnector Stopped ServerConnector@6947848e{SSL,[ssl, 
http/1.1]}{127.0.0.1:0}
   [junit4]   2> 149453 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@70cfae0f{/py_f/yw,null,UNAVAILABLE}
   [junit4]   2> 149459 ERROR 
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper 
server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 149464 INFO  
(TEST-TestInPlaceUpdatesDistrib.test-seed#[74DC3FCD315D8132]) [    ] 
o.a.s.c.ZkTestServer connecting to 127.0.0.1:42694 42694
   [junit4]   2> 149523 INFO  (Thread-427) [    ] o.a.s.c.ZkTestServer 
connecting to 127.0.0.1:42694 42694
   [junit4]   2> 149524 WARN  (Thread-427) [    ] o.a.s.c.ZkTestServer Watch 
limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/aliases.json
   [junit4]   2>        4       /solr/security.json
   [junit4]   2>        4       /solr/configs/conf1
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/clusterstate.json
   [junit4]   2>        5       /solr/clusterprops.json
   [junit4]   2>        3       /solr/collections/collection1/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/live_nodes
   [junit4]   2>        5       /solr/collections
   [junit4]   2>        4       /solr/overseer/queue
   [junit4]   2>        4       /solr/overseer/collection-queue-work
   [junit4]   2>        3       /solr/overseer/queue-work
   [junit4]   2> 
   [junit4]   2> NOTE: reproduce with: ant test  
-Dtestcase=TestInPlaceUpdatesDistrib -Dtests.method=test 
-Dtests.seed=74DC3FCD315D8132 -Dtests.slow=true -Dtests.locale=bg-BG 
-Dtests.timezone=Europe/Kirov -Dtests.asserts=true 
-Dtests.file.encoding=US-ASCII
   [junit4] FAILURE 52.1s J0 | TestInPlaceUpdatesDistrib.test <<<
   [junit4]    > Throwable #1: java.lang.AssertionError: The replica receiving 
reordered updates must not have gone down expected:<3> but was:<2>
   [junit4]    >        at 
__randomizedtesting.SeedInfo.seed([74DC3FCD315D8132:FC8800179FA1ECCA]:0)
   [junit4]    >        at 
org.apache.solr.update.TestInPlaceUpdatesDistrib.delayedReorderingFetchesMissingUpdateFromLeaderTest(TestInPlaceUpdatesDistrib.java:881)
   [junit4]    >        at 
org.apache.solr.update.TestInPlaceUpdatesDistrib.test(TestInPlaceUpdatesDistrib.java:157)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
   [junit4]    >        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 149531 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.SolrTestCaseJ4 ###deleteCore
   [junit4]   2> 149531 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.c.CoreContainer Shutting down CoreContainer instance=423044038
   [junit4]   2> 149531 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.node
   [junit4]   2> 149532 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.jvm
   [junit4]   2> 149533 INFO  
(SUITE-TestInPlaceUpdatesDistrib-seed#[74DC3FCD315D8132]-worker) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for: solr.jetty
   [junit4]   2> 149535 INFO  (coreCloseExecutor-582-thread-1) [    
x:collection1] o.a.s.c.SolrCore [collection1]  CLOSING SolrCore 
org.apache.solr.core.SolrCore@6db197f3
   [junit4]   2> 149552 INFO  (coreCloseExecutor-582-thread-1) [    
x:collection1] o.a.s.m.SolrMetricManager Closing metric reporters for: 
solr.core.collection1
   [junit4]   2> NOTE: leaving temporary files on disk at: 
/export/home/jenkins/workspace/Lucene-Solr-6.x-Solaris/solr/build/solr-core/test/J0/temp/solr.update.TestInPlaceUpdatesDistrib_74DC3FCD315D8132-001
   [junit4]   2> Jul 02, 2017 9:43:31 AM 
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 1 leaked 
thread(s).
   [junit4]   2> NOTE: test params are: codec=CheapBastard, 
sim=RandomSimilarity(queryNorm=false,coord=crazy): {}, locale=bg-BG, 
timezone=Europe/Kirov
   [junit4]   2> NOTE: SunOS 5.11 amd64/Oracle Corporation 1.8.0_131 
(64-bit)/cpus=3,threads=1,free=38279392,total=150622208
   [junit4]   2> NOTE: All tests run in this JVM: [ShardRoutingTest, 
HdfsThreadLeakTest, TestInitQParser, RemoteQueryErrorTest, UpdateParamsTest, 
QueryParsingTest, TestSolrDynamicMBean, ClusterStateTest, 
TestSuggestSpellingConverter, V2ApiIntegrationTest, 
CloneFieldUpdateProcessorFactoryTest, TestQuerySenderNoQuery, 
PreAnalyzedFieldTest, DeleteStatusTest, AnalyticsMergeStrategyTest, 
URLClassifyProcessorTest, DistributedSuggestComponentTest, 
HighlighterMaxOffsetTest, AnalysisAfterCoreReloadTest, 
TestInPlaceUpdatesDistrib]
   [junit4] Completed [24/713 (1!)] on J0 in 52.79s, 1 test, 1 failure <<< 
FAILURES!

[...truncated 45798 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to