Build: https://jenkins.thetaphi.de/job/Lucene-Solr-6.x-Linux/2355/
Java: 64bit/jdk1.8.0_102 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC

1 tests failed.
FAILED:  org.apache.solr.cloud.HttpPartitionTest.test

Error Message:


Stack Trace:
java.lang.NullPointerException
        at 
__randomizedtesting.SeedInfo.seed([3A32EC387A633C72:B266D3E2D49F518A]:0)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:1143)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:1037)
        at org.apache.solr.client.solrj.SolrClient.request(SolrClient.java:1219)
        at 
org.apache.solr.cloud.HttpPartitionTest.sendDoc(HttpPartitionTest.java:609)
        at 
org.apache.solr.cloud.HttpPartitionTest.sendDoc(HttpPartitionTest.java:595)
        at 
org.apache.solr.cloud.HttpPartitionTest.testRf2(HttpPartitionTest.java:294)
        at 
org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:125)
        at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
        at 
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
        at 
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
        at java.lang.reflect.Method.invoke(Method.java:498)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1713)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:907)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:957)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:811)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:462)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:916)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:802)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:852)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:863)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:367)
        at java.lang.Thread.run(Thread.java:745)




Build Log:
[...truncated 11118 lines...]
   [junit4] Suite: org.apache.solr.cloud.HttpPartitionTest
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/init-core-data-001
   [junit4]   2> 316561 INFO  
(SUITE-HttpPartitionTest-seed#[3A32EC387A633C72]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via: 
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
   [junit4]   2> 316561 INFO  
(SUITE-HttpPartitionTest-seed#[3A32EC387A633C72]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
   [junit4]   2> 316562 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 316562 INFO  (Thread-435) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 316562 INFO  (Thread-435) [    ] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 316662 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:40706
   [junit4]   2> 316668 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 316669 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 316669 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 316670 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 316670 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 316671 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 316672 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 316672 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 316673 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 316673 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 316674 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 316733 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores/collection1
   [junit4]   2> 316735 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
jetty-9.3.14.v20161028
   [junit4]   2> 316735 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1b8f4d82{/,null,AVAILABLE}
   [junit4]   2> 316736 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@4be411db{HTTP/1.1,[http/1.1]}{127.0.0.1:34392}
   [junit4]   2> 316736 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
Started @318433ms
   [junit4]   2> 316736 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/tempDir-001/control/data,
 hostContext=/, hostPort=35327, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores}
   [junit4]   2> 316737 ERROR 
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 316737 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 
6.4.0
   [junit4]   2> 316737 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 316737 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 316737 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-06T16:18:15.354Z
   [junit4]   2> 316740 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 316740 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] 
o.a.z.s.NIOServerCnxn caught end of stream exception
   [junit4]   2> EndOfStreamException: Unable to read additional data from 
client sessionid 0x158d4edaa0c0002, likely client has closed socket
   [junit4]   2>        at 
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:228)
   [junit4]   2>        at 
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:208)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 316740 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/solr.xml
   [junit4]   2> 316759 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 316760 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40706/solr
   [junit4]   2> 316780 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:35327_    ] 
o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:35327_
   [junit4]   2> 316780 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:35327_    ] 
o.a.s.c.Overseer Overseer (id=97061509213716484-127.0.0.1:35327_-n_0000000000) 
starting
   [junit4]   2> 316786 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:35327_    ] 
o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:35327_
   [junit4]   2> 316788 INFO  
(OverseerStateUpdate-97061509213716484-127.0.0.1:35327_-n_0000000000) 
[n:127.0.0.1:35327_    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 316833 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:35327_    ] 
o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores
   [junit4]   2> 316833 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:35327_    ] 
o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 316836 INFO  
(OverseerStateUpdate-97061509213716484-127.0.0.1:35327_-n_0000000000) 
[n:127.0.0.1:35327_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard1
   [junit4]   2> 317846 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection   x:collection1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.4.0
   [junit4]   2> 317858 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 317924 WARN  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 317926 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection   x:collection1] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 317933 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection 
control_collection
   [junit4]   2> 317933 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores/collection1/data/]
   [junit4]   2> 317933 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@420a9b7b
   [junit4]   2> 317934 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=36, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.21791737715316925]
   [junit4]   2> 317937 WARN  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 317943 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 317943 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 317948 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 317948 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 317948 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=6.248046875, 
floorSegmentMB=0.6923828125, forceMergeDeletesPctAllowed=8.94610845497283, 
segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1168316969385793
   [junit4]   2> 317949 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@4550bec4[collection1] main]
   [junit4]   2> 317949 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 317950 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 317950 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 317950 INFO  
(searcherExecutor-811-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SolrCore 
[collection1] Registered new searcher Searcher@4550bec4[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 317950 INFO  
(coreLoadExecutor-810-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_ c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1552984148873838592
   [junit4]   2> 317955 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 317955 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 317955 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
Sync replicas to http://127.0.0.1:35327/collection1/
   [junit4]   2> 317955 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
Sync Success - now sync replicas to me
   [junit4]   2> 317955 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
http://127.0.0.1:35327/collection1/ has no replicas
   [junit4]   2> 317958 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:35327/collection1/ shard1
   [junit4]   2> 318108 INFO  
(coreZkRegister-803-thread-1-processing-n:127.0.0.1:35327_ x:collection1 
s:shard1 c:control_collection r:core_node1) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] o.a.s.c.ZkController 
I am the leader, no recovery necessary
   [junit4]   2> 318340 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 318341 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:40706/solr ready
   [junit4]   2> 318341 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection 
loss:false
   [junit4]   2> 318406 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001/cores/collection1
   [junit4]   2> 318406 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001
   [junit4]   2> 318407 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
jetty-9.3.14.v20161028
   [junit4]   2> 318409 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@4e6da314{/,null,AVAILABLE}
   [junit4]   2> 318409 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@374ea950{HTTP/1.1,[http/1.1]}{127.0.0.1:40130}
   [junit4]   2> 318409 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
Started @320106ms
   [junit4]   2> 318409 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/tempDir-001/jetty1,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=34481, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001/cores}
   [junit4]   2> 318410 ERROR 
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 318410 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 
6.4.0
   [junit4]   2> 318410 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 318410 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 318410 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-06T16:18:17.027Z
   [junit4]   2> 318413 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 318413 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001/solr.xml
   [junit4]   2> 318420 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 318420 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40706/solr
   [junit4]   2> 318425 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:34481_    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 318427 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:34481_    ] 
o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:34481_
   [junit4]   2> 318429 INFO  
(zkCallback-380-thread-1-processing-n:127.0.0.1:34481_) [n:127.0.0.1:34481_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 318429 INFO  (zkCallback-375-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 318429 INFO  
(zkCallback-371-thread-1-processing-n:127.0.0.1:35327_) [n:127.0.0.1:35327_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 318463 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:34481_    ] 
o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001/cores
   [junit4]   2> 318463 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:34481_    ] 
o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 318465 INFO  
(OverseerStateUpdate-97061509213716484-127.0.0.1:35327_-n_0000000000) 
[n:127.0.0.1:35327_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 319474 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.4.0
   [junit4]   2> 319485 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 319557 WARN  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 319559 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 319567 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 319567 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-1-001/cores/collection1/data/]
   [junit4]   2> 319567 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@420a9b7b
   [junit4]   2> 319569 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=36, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.21791737715316925]
   [junit4]   2> 319572 WARN  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 319578 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 319578 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 319579 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 319579 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 319579 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=6.248046875, 
floorSegmentMB=0.6923828125, forceMergeDeletesPctAllowed=8.94610845497283, 
segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1168316969385793
   [junit4]   2> 319580 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@b83e1fa[collection1] main]
   [junit4]   2> 319581 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 319581 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 319581 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 319582 INFO  
(searcherExecutor-822-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.SolrCore [collection1] Registered new 
searcher Searcher@b83e1fa[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 319582 INFO  
(coreLoadExecutor-821-thread-1-processing-n:127.0.0.1:34481_) 
[n:127.0.0.1:34481_ c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1552984150585114624
   [junit4]   2> 319586 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas 
found to continue.
   [junit4]   2> 319586 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new 
leader - try and sync
   [junit4]   2> 319586 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:34481/collection1/
   [junit4]   2> 319586 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync 
replicas to me
   [junit4]   2> 319587 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.SyncStrategy 
http://127.0.0.1:34481/collection1/ has no replicas
   [junit4]   2> 319589 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new 
leader: http://127.0.0.1:34481/collection1/ shard2
   [junit4]   2> 319739 INFO  
(coreZkRegister-816-thread-1-processing-n:127.0.0.1:34481_ x:collection1 
s:shard2 c:collection1 r:core_node1) [n:127.0.0.1:34481_ c:collection1 s:shard2 
r:core_node1 x:collection1] o.a.s.c.ZkController I am the leader, no recovery 
necessary
   [junit4]   2> 320110 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores/collection1
   [junit4]   2> 320111 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001
   [junit4]   2> 320111 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
jetty-9.3.14.v20161028
   [junit4]   2> 320112 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@334a00b1{/,null,AVAILABLE}
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@39f7c2f5{HTTP/1.1,[http/1.1]}{127.0.0.1:34763}
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
Started @321810ms
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/tempDir-001/jetty2,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=40718, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores}
   [junit4]   2> 320113 ERROR 
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 
6.4.0
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 320113 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-06T16:18:18.730Z
   [junit4]   2> 320115 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 320115 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/solr.xml
   [junit4]   2> 320120 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 320121 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40706/solr
   [junit4]   2> 320125 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:40718_    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
   [junit4]   2> 320127 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:40718_    ] 
o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:40718_
   [junit4]   2> 320127 INFO  (zkCallback-375-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 320127 INFO  
(zkCallback-380-thread-1-processing-n:127.0.0.1:34481_) [n:127.0.0.1:34481_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 320128 INFO  
(zkCallback-371-thread-2-processing-n:127.0.0.1:35327_) [n:127.0.0.1:35327_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 320128 INFO  
(zkCallback-386-thread-1-processing-n:127.0.0.1:40718_) [n:127.0.0.1:40718_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
   [junit4]   2> 320195 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:40718_    ] 
o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores
   [junit4]   2> 320196 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:40718_    ] 
o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 320197 INFO  
(OverseerStateUpdate-97061509213716484-127.0.0.1:35327_-n_0000000000) 
[n:127.0.0.1:35327_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard1
   [junit4]   2> 321211 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.4.0
   [junit4]   2> 321226 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 321337 WARN  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 321340 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 321347 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 321348 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores/collection1/data/]
   [junit4]   2> 321348 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@420a9b7b
   [junit4]   2> 321349 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=36, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.21791737715316925]
   [junit4]   2> 321353 WARN  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 321363 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 321363 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 321364 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 321364 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 321364 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=6.248046875, 
floorSegmentMB=0.6923828125, forceMergeDeletesPctAllowed=8.94610845497283, 
segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1168316969385793
   [junit4]   2> 321365 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@26657313[collection1] main]
   [junit4]   2> 321365 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 321366 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 321366 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 321367 INFO  
(searcherExecutor-833-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.SolrCore [collection1] Registered new 
searcher Searcher@26657313[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 321367 INFO  
(coreLoadExecutor-832-thread-1-processing-n:127.0.0.1:40718_) 
[n:127.0.0.1:40718_ c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1552984152456822784
   [junit4]   2> 321372 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext Enough replicas 
found to continue.
   [junit4]   2> 321372 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I may be the new 
leader - try and sync
   [junit4]   2> 321372 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync replicas to 
http://127.0.0.1:40718/collection1/
   [junit4]   2> 321372 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.SyncStrategy Sync Success - now sync 
replicas to me
   [junit4]   2> 321372 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.SyncStrategy 
http://127.0.0.1:40718/collection1/ has no replicas
   [junit4]   2> 321374 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.ShardLeaderElectionContext I am the new 
leader: http://127.0.0.1:40718/collection1/ shard1
   [junit4]   2> 321524 INFO  
(coreZkRegister-827-thread-1-processing-n:127.0.0.1:40718_ x:collection1 
s:shard1 c:collection1 r:core_node2) [n:127.0.0.1:40718_ c:collection1 s:shard1 
r:core_node2 x:collection1] o.a.s.c.ZkController I am the leader, no recovery 
necessary
   [junit4]   2> 321809 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.SolrTestCaseJ4 Writing core.properties file to 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001/cores/collection1
   [junit4]   2> 321809 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 3 in directory 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001
   [junit4]   2> 321810 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
jetty-9.3.14.v20161028
   [junit4]   2> 321810 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@4c5cea0d{/,null,AVAILABLE}
   [junit4]   2> 321811 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.AbstractConnector Started 
ServerConnector@250a6c16{HTTP/1.1,[http/1.1]}{127.0.0.1:43451}
   [junit4]   2> 321811 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.e.j.s.Server 
Started @323508ms
   [junit4]   2> 321811 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/tempDir-001/jetty3,
 solrconfig=solrconfig.xml, hostContext=/, hostPort=39404, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001/cores}
   [junit4]   2> 321812 ERROR 
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 321813 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr™ version 
6.4.0
   [junit4]   2> 321813 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 321813 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 321813 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2016-12-06T16:18:20.430Z
   [junit4]   2> 321819 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 321819 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001/solr.xml
   [junit4]   2> 321824 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.u.UpdateShardHandler Creating UpdateShardHandler HTTP client with params: 
socketTimeout=340000&connTimeout=45000&retry=true
   [junit4]   2> 321825 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:40706/solr
   [junit4]   2> 321844 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:39404_    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
   [junit4]   2> 321851 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:39404_    ] 
o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:39404_
   [junit4]   2> 321852 INFO  
(zkCallback-386-thread-1-processing-n:127.0.0.1:40718_) [n:127.0.0.1:40718_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 321852 INFO  (zkCallback-375-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 321853 INFO  
(zkCallback-392-thread-1-processing-n:127.0.0.1:39404_) [n:127.0.0.1:39404_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 321853 INFO  
(zkCallback-380-thread-1-processing-n:127.0.0.1:34481_) [n:127.0.0.1:34481_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 321853 INFO  
(zkCallback-371-thread-2-processing-n:127.0.0.1:35327_) [n:127.0.0.1:35327_    
] o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (3) -> (4)
   [junit4]   2> 321935 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:39404_    ] 
o.a.s.c.CorePropertiesLocator Found 1 core definitions underneath 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001/cores
   [junit4]   2> 321936 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [n:127.0.0.1:39404_    ] 
o.a.s.c.CorePropertiesLocator Cores are: [collection1]
   [junit4]   2> 321937 INFO  
(OverseerStateUpdate-97061509213716484-127.0.0.1:35327_-n_0000000000) 
[n:127.0.0.1:35327_    ] o.a.s.c.o.ReplicaMutator Assigning new node to shard 
shard=shard2
   [junit4]   2> 322945 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1   x:collection1] o.a.s.c.SolrConfig Using 
Lucene MatchVersion: 6.4.0
   [junit4]   2> 322957 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] Schema name=test
   [junit4]   2> 323103 WARN  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1   x:collection1] o.a.s.s.IndexSchema 
[collection1] default search field in schema is text. WARNING: Deprecated, 
please use 'df' on request instead.
   [junit4]   2> 323105 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1   x:collection1] o.a.s.s.IndexSchema Loaded 
schema test/1.0 with uniqueid field id
   [junit4]   2> 323113 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1   x:collection1] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1' using configuration from collection collection1
   [junit4]   2> 323114 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.c.SolrCore [[collection1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001/cores/collection1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-3-001/cores/collection1/data/]
   [junit4]   2> 323114 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@420a9b7b
   [junit4]   2> 323115 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=36, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.21791737715316925]
   [junit4]   2> 323118 WARN  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 323126 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 323126 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 323126 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 323127 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 323127 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=6.248046875, 
floorSegmentMB=0.6923828125, forceMergeDeletesPctAllowed=8.94610845497283, 
segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1168316969385793
   [junit4]   2> 323128 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.s.SolrIndexSearcher Opening [Searcher@3e4602dc[collection1] main]
   [junit4]   2> 323130 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 323130 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 323130 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 323131 INFO  
(searcherExecutor-844-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.SolrCore [collection1] Registered new 
searcher Searcher@3e4602dc[collection1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 323131 INFO  
(coreLoadExecutor-843-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1552984154306510848
   [junit4]   2> 323135 INFO  
(coreZkRegister-838-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.ZkController Core needs to 
recover:collection1
   [junit4]   2> 323136 INFO  
(updateExecutor-389-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DefaultSolrCoreState Running recovery
   [junit4]   2> 323136 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Starting recovery process. 
recoveringAfterStartup=true
   [junit4]   2> 323136 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy ###### startupVersions=[[]]
   [junit4]   2> 323136 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Begin buffering updates. 
core=[collection1]
   [junit4]   2> 323136 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.UpdateLog Starting to buffer updates. 
FSUpdateLog{state=ACTIVE, tlog=null}
   [junit4]   2> 323136 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Publishing state of core 
[collection1] as recovering, leader is [http://127.0.0.1:34481/collection1/] 
and I am [http://127.0.0.1:39404/collection1/]
   [junit4]   2> 323138 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Sending prep recovery 
command to [http://127.0.0.1:34481]; [WaitForState: 
action=PREPRECOVERY&core=collection1&nodeName=127.0.0.1:39404_&coreNodeName=core_node3&state=recovering&checkLive=true&onlyIfLeader=true&onlyIfLeaderActive=true]
   [junit4]   2> 323139 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=48408,localport=34481], receiveBufferSize:531000
   [junit4]   2> 323142 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=40130,localport=56708], receiveBufferSize=530904
   [junit4]   2> 323147 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_    ] 
o.a.s.h.a.PrepRecoveryOp Going to wait for coreNodeName: core_node3, state: 
recovering, checkLive: true, onlyIfLeader: true, onlyIfLeaderActive: true
   [junit4]   2> 323147 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_    ] 
o.a.s.h.a.PrepRecoveryOp Will wait a max of 183 seconds to see collection1 
(shard2 of collection1) have state: recovering
   [junit4]   2> 323147 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? 
true, live=true, checkLive=true, currentState=down, localState=active, 
nodeName=127.0.0.1:39404_, coreNodeName=core_node3, 
onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:39404","node_name":"127.0.0.1:39404_","state":"down"}
   [junit4]   2> 323438 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting test
   [junit4]   2> 323438 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Wait for recoveries to finish - wait 
30000 for each attempt
   [junit4]   2> 323438 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):30000
   [junit4]   2> 324148 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_    ] 
o.a.s.h.a.PrepRecoveryOp In WaitForState(recovering): collection=collection1, 
shard=shard2, thisCore=collection1, leaderDoesNotNeedRecovery=false, isLeader? 
true, live=true, checkLive=true, currentState=recovering, localState=active, 
nodeName=127.0.0.1:39404_, coreNodeName=core_node3, 
onlyIfActiveCheckResult=false, nodeProps: 
core_node3:{"core":"collection1","base_url":"http://127.0.0.1:39404","node_name":"127.0.0.1:39404_","state":"recovering"}
   [junit4]   2> 324148 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_    ] 
o.a.s.h.a.PrepRecoveryOp Waited coreNodeName: core_node3, state: recovering, 
checkLive: true, onlyIfLeader: true for: 1 seconds.
   [junit4]   2> 324148 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={nodeName=127.0.0.1:39404_&onlyIfLeaderActive=true&core=collection1&coreNodeName=core_node3&action=PREPRECOVERY&checkLive=true&state=recovering&onlyIfLeader=true&wt=javabin&version=2}
 status=0 QTime=1001
   [junit4]   2> 331148 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Attempting to PeerSync 
from [http://127.0.0.1:34481/collection1/] - recoveringAfterStartup=[true]
   [junit4]   2> 331148 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.PeerSync PeerSync: core=collection1 
url=http://127.0.0.1:39404 START replicas=[http://127.0.0.1:34481/collection1/] 
nUpdates=100
   [junit4]   2> 331150 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=48446,localport=34481], receiveBufferSize:531000
   [junit4]   2> 331150 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=40130,localport=56746], receiveBufferSize=530904
   [junit4]   2> 331153 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.IndexFingerprint 
IndexFingerprint millis:2.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 331153 INFO  (qtp1697198326-1996) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/get 
params={distrib=false&qt=/get&getFingerprint=9223372036854775807&wt=javabin&version=2}
 status=0 QTime=2
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.IndexFingerprint IndexFingerprint 
millis:0.0 result:{maxVersionSpecified=9223372036854775807, 
maxVersionEncountered=0, maxInHash=0, versionsHash=0, numVersions=0, numDocs=0, 
maxDoc=0}
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.PeerSync We are already in sync. No need to 
do a PeerSync 
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 No uncommitted 
changes. Skipping IW.commit.
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy PeerSync stage of recovery 
was successful.
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Replaying updates buffered 
during PeerSync.
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy No replay needed.
   [junit4]   2> 331154 INFO  
(recoveryExecutor-390-thread-1-processing-n:127.0.0.1:39404_ x:collection1 
s:shard2 c:collection1 r:core_node3) [n:127.0.0.1:39404_ c:collection1 s:shard2 
r:core_node3 x:collection1] o.a.s.c.RecoveryStrategy Registering as Active 
after recovery.
   [junit4]   2> 331439 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 331442 INFO  (SocketProxy-Acceptor-35327) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=44302,localport=35327], receiveBufferSize:531000
   [junit4]   2> 331453 INFO  (SocketProxy-Acceptor-35327) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34392,localport=33128], receiveBufferSize=530904
   [junit4]   2> 331456 INFO  (qtp848240162-1961) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 331460 INFO  (qtp848240162-1961) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 331461 INFO  (qtp848240162-1961) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 331461 INFO  (qtp848240162-1961) [n:127.0.0.1:35327_ 
c:control_collection s:shard1 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 5
   [junit4]   2> 331462 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=48468,localport=34481], receiveBufferSize:531000
   [junit4]   2> 331466 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=40130,localport=56768], receiveBufferSize=530904
   [junit4]   2> 331508 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=58374,localport=40718], receiveBufferSize:531000
   [junit4]   2> 331508 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=48476,localport=34481], receiveBufferSize:531000
   [junit4]   2> 331509 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34763,localport=44192], receiveBufferSize=530904
   [junit4]   2> 331510 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=40130,localport=56778], receiveBufferSize=530904
   [junit4]   2> 331534 INFO  (qtp1697198326-1994) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 331534 INFO  (qtp1697198326-1994) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 331536 INFO  (qtp1691932946-2025) [n:127.0.0.1:40718_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 331545 INFO  (SocketProxy-Acceptor-39404) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=40496,localport=39404], receiveBufferSize:531000
   [junit4]   2> 331557 INFO  (SocketProxy-Acceptor-39404) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=43451,localport=33464], receiveBufferSize=530904
   [junit4]   2> 331557 INFO  (qtp1691932946-2025) [n:127.0.0.1:40718_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 331559 INFO  (qtp1697198326-1994) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 331559 INFO  (qtp1691932946-2025) [n:127.0.0.1:40718_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 331559 INFO  (qtp1697198326-1994) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34481/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 25
   [junit4]   2> 331559 INFO  (qtp1691932946-2025) [n:127.0.0.1:40718_ 
c:collection1 s:shard1 r:core_node2 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34481/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 23
   [junit4]   2> 331560 INFO  (qtp809880539-2054) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
start 
commit{,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 331560 INFO  (qtp809880539-2054) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
No uncommitted changes. Skipping IW.commit.
   [junit4]   2> 331560 INFO  (qtp809880539-2054) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 331560 INFO  (qtp809880539-2054) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={update.distrib=FROMLEADER&waitSearcher=true&openSearcher=true&commit=true&softCommit=false&distrib.from=http://127.0.0.1:34481/collection1/&commit_end_point=true&wt=javabin&version=2&expungeDeletes=false}{commit=}
 0 0
   [junit4]   2> 331560 INFO  (qtp1697198326-1993) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1]  webapp= path=/update 
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 91
   [junit4]   2> 331562 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=48486,localport=34481], receiveBufferSize:531000
   [junit4]   2> 331562 INFO  (SocketProxy-Acceptor-34481) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=40130,localport=56786], receiveBufferSize=530904
   [junit4]   2> 331566 INFO  (qtp1697198326-1991) [n:127.0.0.1:34481_ 
c:collection1 s:shard2 r:core_node1 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 331569 INFO  (SocketProxy-Acceptor-39404) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=40504,localport=39404], receiveBufferSize:531000
   [junit4]   2> 331572 INFO  (SocketProxy-Acceptor-39404) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=43451,localport=33472], receiveBufferSize=530904
   [junit4]   2> 331573 INFO  (qtp809880539-2050) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 331575 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=58394,localport=40718], receiveBufferSize:531000
   [junit4]   2> 331575 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34763,localport=44210], receiveBufferSize=530904
   [junit4]   2> 331578 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:collection1 s:shard1 r:core_node2 x:collection1] o.a.s.c.S.Request 
[collection1]  webapp= path=/select 
params={q=*:*&distrib=false&tests=checkShardConsistency&rows=0&wt=javabin&version=2}
 hits=0 status=0 QTime=0
   [junit4]   2> 333586 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Creating collection with stateFormat=1: 
c8n_crud_1x2
   [junit4]   2> 333587 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=58418,localport=40718], receiveBufferSize:531000
   [junit4]   2> 333588 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34763,localport=44234], receiveBufferSize=530904
   [junit4]   2> 333590 INFO  (qtp1691932946-2022) [n:127.0.0.1:40718_    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
replicationFactor=2&maxShardsPerNode=1&name=c8n_crud_1x2&action=CREATE&numShards=1&stateFormat=1&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 333591 INFO  
(OverseerThreadFactory-808-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_    ] o.a.s.c.CreateCollectionCmd Create collection 
c8n_crud_1x2
   [junit4]   2> 333591 INFO  
(OverseerThreadFactory-808-thread-1-processing-n:127.0.0.1:35327_) 
[n:127.0.0.1:35327_    ] o.a.s.c.CreateCollectionCmd Only one config set found 
in zk - using it:conf1
   [junit4]   2> 333695 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=58422,localport=40718], receiveBufferSize:531000
   [junit4]   2> 333695 INFO  (SocketProxy-Acceptor-35327) [    ] 
o.a.s.c.SocketProxy accepted 
Socket[addr=/127.0.0.1,port=44368,localport=35327], receiveBufferSize:531000
   [junit4]   2> 333696 INFO  (SocketProxy-Acceptor-40718) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34763,localport=44242], receiveBufferSize=530904
   [junit4]   2> 333696 INFO  (SocketProxy-Acceptor-35327) [    ] 
o.a.s.c.SocketProxy proxy connection 
Socket[addr=/127.0.0.1,port=34392,localport=33194], receiveBufferSize=530904
   [junit4]   2> 333697 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_crud_1x2_shard1_replica2&action=CREATE&numShards=1&collection=c8n_crud_1x2&shard=shard1&wt=javabin&version=2
   [junit4]   2> 333697 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=c8n_crud_1x2_shard1_replica1&action=CREATE&numShards=1&collection=c8n_crud_1x2&shard=shard1&wt=javabin&version=2
   [junit4]   2> 334713 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica2] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.4.0
   [junit4]   2> 334713 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica1] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 6.4.0
   [junit4]   2> 334734 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica1] o.a.s.s.IndexSchema 
[c8n_crud_1x2_shard1_replica1] Schema name=test
   [junit4]   2> 334734 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica2] o.a.s.s.IndexSchema 
[c8n_crud_1x2_shard1_replica2] Schema name=test
   [junit4]   2> 334831 WARN  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica2] o.a.s.s.IndexSchema 
[c8n_crud_1x2_shard1_replica2] default search field in schema is text. WARNING: 
Deprecated, please use 'df' on request instead.
   [junit4]   2> 334833 WARN  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica1] o.a.s.s.IndexSchema 
[c8n_crud_1x2_shard1_replica1] default search field in schema is text. WARNING: 
Deprecated, please use 'df' on request instead.
   [junit4]   2> 334834 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica2] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 334834 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica1] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 334842 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica1] o.a.s.c.CoreContainer 
Creating SolrCore 'c8n_crud_1x2_shard1_replica1' using configuration from 
collection c8n_crud_1x2
   [junit4]   2> 334842 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.c.SolrCore [[c8n_crud_1x2_shard1_replica1] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores/c8n_crud_1x2_shard1_replica1],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/control-001/cores/c8n_crud_1x2_shard1_replica1/data/]
   [junit4]   2> 334842 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1  x:c8n_crud_1x2_shard1_replica2] o.a.s.c.CoreContainer 
Creating SolrCore 'c8n_crud_1x2_shard1_replica2' using configuration from 
collection c8n_crud_1x2
   [junit4]   2> 334842 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@420a9b7b
   [junit4]   2> 334843 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.c.SolrCore [[c8n_crud_1x2_shard1_replica2] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores/c8n_crud_1x2_shard1_replica2],
 
dataDir=[/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001/shard-2-001/cores/c8n_crud_1x2_shard1_replica2/data/]
   [junit4]   2> 334843 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.c.JmxMonitoredMap JMX monitoring is enabled. Adding Solr mbeans to JMX 
Server: com.sun.jmx.mbeanserver.JmxMBeanServer@420a9b7b
   [junit4]   2> 334845 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=36, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.21791737715316925]
   [junit4]   2> 334845 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=36, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.21791737715316925]
   [junit4]   2> 334848 WARN  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 334848 WARN  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 334853 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 334853 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 334854 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 334854 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 334854 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=39, maxMergedSegmentMB=6.248046875, 
floorSegmentMB=0.6923828125, forceMergeDeletesPctAllowed=8.94610845497283, 
segmentsPerTier=20.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.1168316969385793
   [junit4]   2> 334854 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@6e79ed2a[c8n_crud_1x2_shard1_replica1] main]
   [junit4]   2> 334855 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.UpdateLog
   [junit4]   2> 334855 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 334855 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 334855 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 334855 INFO  (qtp848240162-1955) [n:127.0.0.1:35327_ 
c:c8n_crud_1x2 s:shard1 r:core_node1 x:c8n_crud_1x2_shard1_replica1] 
o.a.s.h.ReplicationHandler Commits will be reserved for  10000
   [junit4]   2> 334856 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 334856 INFO  (qtp1691932946-2021) [n:127.0.0.1:40718_ 
c:c8n_crud_1x2 s:shard1 r:core_node2 x:c8n_crud_1x2_shard1_replica2] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 334856 INFO  
(searcherExecutor-849-thread-1-processing-n:127.0.0.1:35327_ 
x:c8n_crud_1x2_shard1_replica1 s:shard1 c:c8n_crud_1x2 r:core_node1) 
[n:127.0.0.1:35327_ c:c8n_crud_1x2 s:shard1 r:core_node1 
x:c8n_crud_1x2_shard1_replica1] o.a.s.c.SolrCore [c8n_crud_1x2_shard1_replica1] 
Registered new searcher Searcher@6e79ed2a[c8n_crud_1x2_shard1_replica1] 
main{ExitableDirectoryReader(Uninver

[...truncated too long message...]

]   2> 364268 INFO  (zkCallback-392-thread-1-processing-n:127.0.0.1:39404_) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 364268 INFO  
(zkCallback-392-thread-1-processing-n:127.0.0.1:39404_) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] o.a.s.c.SyncStrategy 
http://127.0.0.1:39404/collection1/ has no replicas
   [junit4]   2> 364270 INFO  
(zkCallback-392-thread-1-processing-n:127.0.0.1:39404_) [n:127.0.0.1:39404_ 
c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
http://127.0.0.1:39404/collection1/ shard2
   [junit4]   2> 364309 WARN  
(zkCallback-386-thread-1-processing-n:127.0.0.1:40718_) [n:127.0.0.1:40718_ 
c:c8n_1x2 s:shard1 r:core_node2 x:c8n_1x2_shard1_replica1] o.a.s.c.SyncStrategy 
Closed, skipping sync up.
   [junit4]   2> 364309 INFO  
(zkCallback-386-thread-1-processing-n:127.0.0.1:40718_) [n:127.0.0.1:40718_ 
c:c8n_1x2 s:shard1 r:core_node2 x:c8n_1x2_shard1_replica1] 
o.a.s.c.ShardLeaderElectionContext We failed sync, but we have no versions - we 
can't sync in that case - we were active before, so become leader anyway
   [junit4]   2> 364309 INFO  
(zkCallback-386-thread-1-processing-n:127.0.0.1:40718_) [n:127.0.0.1:40718_ 
c:c8n_1x2 s:shard1 r:core_node2 x:c8n_1x2_shard1_replica1] o.a.s.c.SolrCore 
[c8n_1x2_shard1_replica1]  CLOSING SolrCore 
org.apache.solr.core.SolrCore@278a974d
   [junit4]   2> 364327 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.AbstractConnector Stopped 
ServerConnector@39f7c2f5{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 364328 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@334a00b1{/,null,UNAVAILABLE}
   [junit4]   2> 364329 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ChaosMonkey monkey: stop shard! 39404
   [junit4]   2> 364329 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.CoreContainer Shutting down CoreContainer instance=1238797644
   [junit4]   2> 364330 INFO  (coreCloseExecutor-899-thread-1) 
[n:127.0.0.1:39404_ c:collection1 s:shard2 r:core_node3 x:collection1] 
o.a.s.c.SolrCore [collection1]  CLOSING SolrCore 
org.apache.solr.core.SolrCore@cbbe08a
   [junit4]   2> 364336 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] o.a.s.c.Overseer 
Overseer (id=97061509213716494-127.0.0.1:39404_-n_0000000003) closing
   [junit4]   2> 364336 INFO  
(OverseerStateUpdate-97061509213716494-127.0.0.1:39404_-n_0000000003) 
[n:127.0.0.1:39404_    ] o.a.s.c.Overseer Overseer Loop exiting : 
127.0.0.1:39404_
   [junit4]   2> 364358 WARN  
(zkCallback-392-thread-1-processing-n:127.0.0.1:39404_) [n:127.0.0.1:39404_    
] o.a.s.c.c.ZkStateReader ZooKeeper watch triggered, but Solr cannot talk to 
ZK: [KeeperErrorCode = Session expired for /live_nodes]
   [junit4]   2> 365799 WARN  
(zkCallback-392-thread-3-processing-n:127.0.0.1:39404_) [n:127.0.0.1:39404_ 
c:collMinRf_1x3 s:shard1 r:core_node2 x:collMinRf_1x3_shard1_replica1] 
o.a.s.c.SyncStrategy Closed, skipping sync up.
   [junit4]   2> 365799 INFO  
(zkCallback-392-thread-3-processing-n:127.0.0.1:39404_) [n:127.0.0.1:39404_ 
c:collMinRf_1x3 s:shard1 r:core_node2 x:collMinRf_1x3_shard1_replica1] 
o.a.s.c.SolrCore [collMinRf_1x3_shard1_replica1]  CLOSING SolrCore 
org.apache.solr.core.SolrCore@1303c0
   [junit4]   2> 365808 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.AbstractConnector Stopped 
ServerConnector@250a6c16{HTTP/1.1,[http/1.1]}{127.0.0.1:0}
   [junit4]   2> 365808 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.e.j.s.h.ContextHandler Stopped 
o.e.j.s.ServletContextHandler@4c5cea0d{/,null,UNAVAILABLE}
   [junit4]   2> 365809 INFO  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.ZkTestServer connecting to 127.0.0.1:40706 40706
   [junit4]   2> 365814 INFO  (Thread-435) [    ] o.a.s.c.ZkTestServer 
connecting to 127.0.0.1:40706 40706
   [junit4]   2> 365815 WARN  (Thread-435) [    ] o.a.s.c.ZkTestServer Watch 
limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/clusterprops.json
   [junit4]   2>        5       /solr/aliases.json
   [junit4]   2>        4       /solr/security.json
   [junit4]   2>        4       /solr/configs/conf1
   [junit4]   2>        3       /solr/collections/collection1/state.json
   [junit4]   2>        3       /solr/collections/collMinRf_1x3/state.json
   [junit4]   2>        2       /solr/collections/c8n_1x2/state.json
   [junit4]   2>        2       /solr/collections/c8n_crud_1x2/state.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2>        5       /solr/clusterstate.json
   [junit4]   2>        2       
/solr/collections/collMinRf_1x3/leader_elect/shard1/election/97061509213716491-core_node1-n_0000000000
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2>        47      /solr/overseer/queue
   [junit4]   2>        38      /solr/overseer/collection-queue-work
   [junit4]   2>        6       /solr/overseer/queue-work
   [junit4]   2>        5       /solr/live_nodes
   [junit4]   2>        5       /solr/collections
   [junit4]   2> 
   [junit4]   2> 365815 WARN  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SocketProxy Closing 13 connections to: http://127.0.0.1:40718/, target: 
http://127.0.0.1:34763/
   [junit4]   2> 365815 WARN  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:39404/, target: 
http://127.0.0.1:43451/
   [junit4]   2> 365815 WARN  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SocketProxy Closing 6 connections to: http://127.0.0.1:34481/, target: 
http://127.0.0.1:40130/
   [junit4]   2> 365815 WARN  
(TEST-HttpPartitionTest.test-seed#[3A32EC387A633C72]) [    ] 
o.a.s.c.SocketProxy Closing 3 connections to: http://127.0.0.1:35327/, target: 
http://127.0.0.1:34392/
   [junit4]   2> NOTE: reproduce with: ant test  -Dtestcase=HttpPartitionTest 
-Dtests.method=test -Dtests.seed=3A32EC387A633C72 -Dtests.multiplier=3 
-Dtests.slow=true -Dtests.locale=und -Dtests.timezone=Europe/Sarajevo 
-Dtests.asserts=true -Dtests.file.encoding=UTF-8
   [junit4] ERROR   49.3s J0 | HttpPartitionTest.test <<<
   [junit4]    > Throwable #1: java.lang.NullPointerException
   [junit4]    >        at 
__randomizedtesting.SeedInfo.seed([3A32EC387A633C72:B266D3E2D49F518A]:0)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:1143)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:1037)
   [junit4]    >        at 
org.apache.solr.client.solrj.SolrClient.request(SolrClient.java:1219)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.sendDoc(HttpPartitionTest.java:609)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.sendDoc(HttpPartitionTest.java:595)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.testRf2(HttpPartitionTest.java:294)
   [junit4]    >        at 
org.apache.solr.cloud.HttpPartitionTest.test(HttpPartitionTest.java:125)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:992)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:967)
   [junit4]    >        at java.lang.Thread.run(Thread.java:745)
   [junit4]   2> 365818 INFO  
(SUITE-HttpPartitionTest-seed#[3A32EC387A633C72]-worker) [    ] 
o.a.s.SolrTestCaseJ4 ###deleteCore
   [junit4]   2> NOTE: leaving temporary files on disk at: 
/home/jenkins/workspace/Lucene-Solr-6.x-Linux/solr/build/solr-core/test/J0/temp/solr.cloud.HttpPartitionTest_3A32EC387A633C72-001
   [junit4]   2> Dec 06, 2016 4:19:04 PM 
com.carrotsearch.randomizedtesting.ThreadLeakControl checkThreadLeaks
   [junit4]   2> WARNING: Will linger awaiting termination of 1 leaked 
thread(s).
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene62): 
{range_facet_l_dv=Lucene50(blocksize=128), _version_=PostingsFormat(name=Memory 
doPackFST= true), multiDefault=Lucene50(blocksize=128), 
a_t=PostingsFormat(name=Memory doPackFST= true), 
intDefault=PostingsFormat(name=Memory doPackFST= true), 
id=Lucene50(blocksize=128), range_facet_i_dv=PostingsFormat(name=Memory 
doPackFST= true), text=PostingsFormat(name=Memory doPackFST= false), 
range_facet_l=PostingsFormat(name=Memory doPackFST= true), 
timestamp=PostingsFormat(name=Memory doPackFST= true)}, 
docValues:{range_facet_l_dv=DocValuesFormat(name=Lucene54), 
range_facet_i_dv=DocValuesFormat(name=Asserting), 
timestamp=DocValuesFormat(name=Asserting)}, maxPointsInLeafNode=606, 
maxMBSortInHeap=7.839610450506012, 
sim=RandomSimilarity(queryNorm=true,coord=crazy): {}, locale=und, 
timezone=Europe/Sarajevo
   [junit4]   2> NOTE: Linux 4.4.0-47-generic amd64/Oracle Corporation 
1.8.0_102 (64-bit)/cpus=12,threads=1,free=227994032,total=508887040
   [junit4]   2> NOTE: All tests run in this JVM: [TestUseDocValuesAsStored, 
TestPushWriter, TestStandardQParsers, TestConfigReload, 
HdfsTlogReplayBufferedWhileIndexingTest, ReturnFieldsTest, 
TestEmbeddedSolrServerConstructors, CdcrVersionReplicationTest, 
TestSQLHandlerNonCloud, UniqFieldsUpdateProcessorFactoryTest, 
TestDynamicFieldCollectionResource, DateFieldTest, 
SimpleCollectionCreateDeleteTest, AtomicUpdatesTest, BitVectorTest, 
CheckHdfsIndexTest, OverseerRolesTest, TestInfoStreamLogging, 
ConcurrentDeleteAndCreateCollectionTest, SyncSliceTest, TestConfigSetsAPI, 
TestPerFieldSimilarityWithDefaultOverride, RequiredFieldsTest, 
CreateCollectionCleanupTest, TestCloudRecovery, TestDFISimilarityFactory, 
HttpPartitionTest]
   [junit4] Completed [135/660 (1!)] on J0 in 50.02s, 1 test, 1 error <<< 
FAILURES!

[...truncated 62509 lines...]

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to