Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/20951/
Java: 32bit/jdk1.8.0_144 -client -XX:+UseConcMarkSweepGC

2 tests failed.
FAILED:  org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.test

Error Message:
Timeout occured while waiting response from server at: 
http://127.0.0.1:41365/y_/si

Stack Trace:
org.apache.solr.client.solrj.SolrServerException: Timeout occured while waiting 
response from server at: http://127.0.0.1:41365/y_/si
        at 
__randomizedtesting.SeedInfo.seed([6B515ACCA25292F:8EE12A7664D944D7]:0)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:654)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1103)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:883)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:816)
        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
        at 
org.apache.solr.cloud.AbstractFullDistribZkTestBase.createServers(AbstractFullDistribZkTestBase.java:314)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:991)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.net.SocketTimeoutException: Read timed out
        at java.net.SocketInputStream.socketRead0(Native Method)
        at java.net.SocketInputStream.socketRead(SocketInputStream.java:116)
        at java.net.SocketInputStream.read(SocketInputStream.java:171)
        at java.net.SocketInputStream.read(SocketInputStream.java:141)
        at 
org.apache.http.impl.io.SessionInputBufferImpl.streamRead(SessionInputBufferImpl.java:137)
        at 
org.apache.http.impl.io.SessionInputBufferImpl.fillBuffer(SessionInputBufferImpl.java:153)
        at 
org.apache.http.impl.io.SessionInputBufferImpl.readLine(SessionInputBufferImpl.java:282)
        at 
org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:138)
        at 
org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:56)
        at 
org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:259)
        at 
org.apache.http.impl.DefaultBHttpClientConnection.receiveResponseHeader(DefaultBHttpClientConnection.java:163)
        at 
org.apache.http.impl.conn.CPoolProxy.receiveResponseHeader(CPoolProxy.java:165)
        at 
org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:273)
        at 
org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:125)
        at 
org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:272)
        at 
org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:185)
        at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89)
        at 
org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:111)
        at 
org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185)
        at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83)
        at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:542)
        ... 43 more


FAILED:  org.apache.solr.index.hdfs.CheckHdfsIndexTest.testChecksumsOnly

Error Message:
Timeout occured while waiting response from server at: 
https://127.0.0.1:43313/vh_wo

Stack Trace:
org.apache.solr.client.solrj.SolrServerException: Timeout occured while waiting 
response from server at: https://127.0.0.1:43313/vh_wo
        at 
__randomizedtesting.SeedInfo.seed([6B515ACCA25292F:A450DC8F28EAD1EF]:0)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:654)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1103)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:883)
        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:816)
        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
        at 
org.apache.solr.cloud.AbstractFullDistribZkTestBase.createServers(AbstractFullDistribZkTestBase.java:314)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:991)
        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
        at 
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
        at 
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
        at 
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
        at 
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
        at 
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
        at 
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
        at 
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
        at java.lang.Thread.run(Thread.java:748)
Caused by: java.net.SocketTimeoutException: Read timed out
        at java.net.SocketInputStream.socketRead0(Native Method)
        at java.net.SocketInputStream.socketRead(SocketInputStream.java:116)
        at java.net.SocketInputStream.read(SocketInputStream.java:171)
        at java.net.SocketInputStream.read(SocketInputStream.java:141)
        at sun.security.ssl.InputRecord.readFully(InputRecord.java:465)
        at sun.security.ssl.InputRecord.read(InputRecord.java:503)
        at sun.security.ssl.SSLSocketImpl.readRecord(SSLSocketImpl.java:983)
        at sun.security.ssl.SSLSocketImpl.readDataRecord(SSLSocketImpl.java:940)
        at sun.security.ssl.AppInputStream.read(AppInputStream.java:105)
        at 
org.apache.http.impl.io.SessionInputBufferImpl.streamRead(SessionInputBufferImpl.java:137)
        at 
org.apache.http.impl.io.SessionInputBufferImpl.fillBuffer(SessionInputBufferImpl.java:153)
        at 
org.apache.http.impl.io.SessionInputBufferImpl.readLine(SessionInputBufferImpl.java:282)
        at 
org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:138)
        at 
org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:56)
        at 
org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:259)
        at 
org.apache.http.impl.DefaultBHttpClientConnection.receiveResponseHeader(DefaultBHttpClientConnection.java:163)
        at 
org.apache.http.impl.conn.CPoolProxy.receiveResponseHeader(CPoolProxy.java:165)
        at 
org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:273)
        at 
org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:125)
        at 
org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:272)
        at 
org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:185)
        at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89)
        at 
org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:111)
        at 
org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185)
        at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83)
        at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56)
        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:542)
        ... 43 more




Build Log:
[...truncated 11691 lines...]
   [junit4] Suite: org.apache.solr.index.hdfs.CheckHdfsIndexTest
   [junit4]   2> 17798 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks: 
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
   [junit4]   2> Creating dataDir: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/init-core-data-001
   [junit4]   2> 17798 WARN  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=27 numCloses=27
   [junit4]   2> 17799 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true) 
w/NUMERIC_DOCVALUES_SYSPROP=false
   [junit4]   2> 17800 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.s.SolrTestCaseJ4 Randomized ssl (true) and clientAuth (true) via: 
@org.apache.solr.util.RandomizeSSL(reason=, ssl=NaN, value=NaN, clientAuth=NaN)
   [junit4]   2> 17802 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /vh_wo/
   [junit4]   2> 18140 WARN  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.h.u.NativeCodeLoader Unable to load native-hadoop library for your 
platform... using builtin-java classes where applicable
   [junit4]   1> Formatting using clusterid: testClusterID
   [junit4]   2> 18956 WARN  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried 
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
   [junit4]   2> 19146 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Logging 
to org.slf4j.impl.Log4jLoggerAdapter(org.mortbay.log) via 
org.mortbay.log.Slf4jLog
   [junit4]   2> 19179 WARN  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 19579 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log 
jetty-6.1.26
   [junit4]   2> 19628 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
 to ./temp/Jetty_localhost_localdomain_43669_hdfs____bwpaic/webapp
   [junit4]   2> 22290 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost.localdomain:43669
   [junit4]   2> 23308 WARN  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 23313 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log 
jetty-6.1.26
   [junit4]   2> 23327 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_37295_datanode____o2jc29/webapp
   [junit4]   2> 23831 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:37295
   [junit4]   2> 24139 WARN  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] 
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
   [junit4]   2> 24140 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log 
jetty-6.1.26
   [junit4]   2> 24157 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Extract 
jar:file:/home/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
 to ./temp/Jetty_localhost_43517_datanode____.vypjx/webapp
   [junit4]   2> 24666 INFO  
(SUITE-CheckHdfsIndexTest-seed#[6B515ACCA25292F]-worker) [    ] o.m.log Started 
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:43517
   [junit4]   2> 24957 ERROR (DataNode: 
[[[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/tempDir-001/hdfsBaseDir/data/data1/,
 
[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/tempDir-001/hdfsBaseDir/data/data2/]]
  heartbeating to localhost.localdomain/127.0.0.1:33053) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 25102 ERROR (DataNode: 
[[[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/tempDir-001/hdfsBaseDir/data/data3/,
 
[DISK]file:/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/tempDir-001/hdfsBaseDir/data/data4/]]
  heartbeating to localhost.localdomain/127.0.0.1:33053) [    ] 
o.a.h.h.s.d.DirectoryScanner 
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1 
ms/sec. Assuming default value of 1000
   [junit4]   2> 25166 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x6896792660f49: from storage 
DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638 node 
DatanodeRegistration(127.0.0.1:40705, 
datanodeUuid=f4444535-7614-47ec-8bf1-9e9b3de6078c, infoPort=41983, 
infoSecurePort=0, ipcPort=41935, 
storageInfo=lv=-56;cid=testClusterID;nsid=833900056;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 13 msecs
   [junit4]   2> 25166 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x6896792660e9b: from storage 
DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96 node 
DatanodeRegistration(127.0.0.1:41669, 
datanodeUuid=0c41091c-d44b-4977-ba1c-947bda5beccc, infoPort=43603, 
infoSecurePort=0, ipcPort=44773, 
storageInfo=lv=-56;cid=testClusterID;nsid=833900056;c=0), blocks: 0, 
hasStaleStorage: true, processing time: 1 msecs
   [junit4]   2> 25167 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x6896792660f49: from storage 
DS-24ed71f7-8ff8-4327-a9e7-c2603584c910 node 
DatanodeRegistration(127.0.0.1:40705, 
datanodeUuid=f4444535-7614-47ec-8bf1-9e9b3de6078c, infoPort=41983, 
infoSecurePort=0, ipcPort=41935, 
storageInfo=lv=-56;cid=testClusterID;nsid=833900056;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 25168 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* processReport 0x6896792660e9b: from storage 
DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14 node 
DatanodeRegistration(127.0.0.1:41669, 
datanodeUuid=0c41091c-d44b-4977-ba1c-947bda5beccc, infoPort=43603, 
infoSecurePort=0, ipcPort=44773, 
storageInfo=lv=-56;cid=testClusterID;nsid=833900056;c=0), blocks: 0, 
hasStaleStorage: false, processing time: 0 msecs
   [junit4]   2> 25569 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ZkTestServer STARTING ZK TEST SERVER
   [junit4]   2> 25570 INFO  (Thread-203) [    ] o.a.s.c.ZkTestServer client 
port:0.0.0.0/0.0.0.0:0
   [junit4]   2> 25571 INFO  (Thread-203) [    ] o.a.s.c.ZkTestServer Starting 
server
   [junit4]   2> 25647 ERROR (Thread-203) [    ] o.a.z.s.ZooKeeperServer 
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action 
on ERROR or SHUTDOWN server state changes
   [junit4]   2> 25670 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ZkTestServer start zk server on port:38619
   [junit4]   2> 25765 WARN  (NIOServerCxn.Factory:0.0.0.0/0.0.0.0:0) [    ] 
o.a.z.s.NIOServerCnxn caught end of stream exception
   [junit4]   2> EndOfStreamException: Unable to read additional data from 
client sessionid 0x15fd572a07b0000, likely client has closed socket
   [junit4]   2>        at 
org.apache.zookeeper.server.NIOServerCnxn.doIO(NIOServerCnxn.java:239)
   [junit4]   2>        at 
org.apache.zookeeper.server.NIOServerCnxnFactory.run(NIOServerCnxnFactory.java:203)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> 25784 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
 to /configs/conf1/solrconfig.xml
   [junit4]   2> 25787 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/schema.xml
 to /configs/conf1/schema.xml
   [junit4]   2> 25789 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
 to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
   [junit4]   2> 25796 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
 to /configs/conf1/stopwords.txt
   [junit4]   2> 25798 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/protwords.txt
 to /configs/conf1/protwords.txt
   [junit4]   2> 25799 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/currency.xml
 to /configs/conf1/currency.xml
   [junit4]   2> 25800 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
 to /configs/conf1/enumsConfig.xml
   [junit4]   2> 25802 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
 to /configs/conf1/open-exchange-rates.json
   [junit4]   2> 25803 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
 to /configs/conf1/mapping-ISOLatin1Accent.txt
   [junit4]   2> 25805 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
 to /configs/conf1/old_synonyms.txt
   [junit4]   2> 25806 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractZkTestCase put 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
 to /configs/conf1/synonyms.txt
   [junit4]   2> 25829 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase Will use NRT replicas unless explicitly 
asked otherwise
   [junit4]   2> 25916 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] o.e.j.s.Server 
jetty-9.3.20.v20170531
   [junit4]   2> 25917 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@1dea64b{/vh_wo,null,AVAILABLE}
   [junit4]   2> 25919 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@702df0{SSL,[ssl, 
http/1.1]}{127.0.0.1:41973}
   [junit4]   2> 25919 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] o.e.j.s.Server 
Started @27376ms
   [junit4]   2> 25919 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=hdfs://localhost.localdomain:33053/hdfs__localhost.localdomain_33053__home_jenkins_workspace_Lucene-Solr-master-Linux_solr_build_solr-core_test_J0_temp_solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001_tempDir-002_control_data,
 hostContext=/vh_wo, hostPort=41973, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/control-001/cores}
   [junit4]   2> 25919 ERROR 
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 25919 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 25919 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 25920 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 25920 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-11-19T18:03:00.397Z
   [junit4]   2> 25923 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 25923 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/control-001/solr.xml
   [junit4]   2> 25932 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay 
is ignored
   [junit4]   2> 25932 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter 
autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 25940 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 25951 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38619/solr
   [junit4]   2> 26115 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 26117 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.OverseerElectionContext I am going to be 
the leader 127.0.0.1:41973_vh_wo
   [junit4]   2> 26119 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.Overseer Overseer 
(id=99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000) starting
   [junit4]   2> 26190 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:41973_vh_wo
   [junit4]   2> 26203 INFO  
(zkCallback-27-thread-1-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 26503 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 26527 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 26527 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 26543 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.CorePropertiesLocator Found 0 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/control-001/cores
   [junit4]   2> 26595 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 26599 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38619/solr ready
   [junit4]   2> 26694 INFO  (qtp24773453-433) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:41973_vh_wo&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 26707 INFO  
(OverseerThreadFactory-125-thread-1-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.CreateCollectionCmd Create collection 
control_collection
   [junit4]   2> 26840 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 26841 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 26953 INFO  
(zkCallback-27-thread-1-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 27893 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 27944 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
   [junit4]   2> 28129 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 28166 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1' 
using configuration from collection control_collection, trusted=true
   [junit4]   2> 28166 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.control_collection.shard1.replica_n1' (registry 
'solr.core.control_collection.shard1.replica_n1') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 28171 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory 
solr.hdfs.home=hdfs://localhost.localdomain:33053/solr_hdfs_home
   [junit4]   2> 28171 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled
   [junit4]   2> 28171 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
   [junit4]   2> 28171 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore 
at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/control-001/cores/control_collection_shard1_replica_n1],
 
dataDir=[hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data/]
   [junit4]   2> 28175 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory creating directory factory for path 
hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data/snapshot_metadata
   [junit4]   2> 28199 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct 
memory allocation set to [true]
   [junit4]   2> 28199 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of 
[8388608] will allocate [1] slabs and use ~[8388608] bytes
   [junit4]   2> 28200 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
   [junit4]   2> 28670 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.b.BlockDirectory Block cache on write is disabled
   [junit4]   2> 28678 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory creating directory factory for path 
hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data
   [junit4]   2> 28710 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory creating directory factory for path 
hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data/index
   [junit4]   2> 28719 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct 
memory allocation set to [true]
   [junit4]   2> 28719 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of 
[8388608] will allocate [1] slabs and use ~[8388608] bytes
   [junit4]   2> 28719 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
   [junit4]   2> 28725 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.b.BlockDirectory Block cache on write is disabled
   [junit4]   2> 28726 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=19, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.0]
   [junit4]   2> 28945 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741825_1001{UCState=COMMITTED, truncateBlock=null, primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|RBW]]}
 size 69
   [junit4]   2> 28945 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741825_1001 size 69
   [junit4]   2> 29375 WARN  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 29462 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateHandler Using UpdateLog implementation: 
org.apache.solr.update.HdfsUpdateLog
   [junit4]   2> 29462 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH 
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 29462 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=2
   [junit4]   2> 29476 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.CommitTracker Hard AutoCommit: disabled
   [junit4]   2> 29476 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.CommitTracker Soft AutoCommit: disabled
   [junit4]   2> 29481 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=19, maxMergedSegmentMB=12.9619140625, 
floorSegmentMB=2.181640625, forceMergeDeletesPctAllowed=26.798415289669794, 
segmentsPerTier=18.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.8082669045615982
   [junit4]   2> 29552 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@1949651[control_collection_shard1_replica_n1] main]
   [junit4]   2> 29556 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 29556 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 29560 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 29566 INFO  
(searcherExecutor-128-thread-1-processing-n:127.0.0.1:41973_vh_wo 
x:control_collection_shard1_replica_n1 s:shard1 c:control_collection) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1  
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore 
[control_collection_shard1_replica_n1] Registered new searcher 
Searcher@1949651[control_collection_shard1_replica_n1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 29567 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using 
new clock 1584518486078521344
   [junit4]   2> 29580 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 29580 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 29580 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy Sync replicas to 
https://127.0.0.1:41973/vh_wo/control_collection_shard1_replica_n1/
   [junit4]   2> 29581 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
   [junit4]   2> 29581 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.SyncStrategy 
https://127.0.0.1:41973/vh_wo/control_collection_shard1_replica_n1/ has no 
replicas
   [junit4]   2> 29581 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in 
election, clear LIR
   [junit4]   2> 29588 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
https://127.0.0.1:41973/vh_wo/control_collection_shard1_replica_n1/ shard1
   [junit4]   2> 29590 INFO  
(zkCallback-27-thread-2-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 29639 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.c.ZkController I am the leader, no recovery necessary
   [junit4]   2> 29641 INFO  (qtp24773453-428) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=2802
   [junit4]   2> 29663 INFO  (qtp24773453-433) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 29743 INFO  
(zkCallback-27-thread-2-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 30663 INFO  (qtp24773453-433) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:41973_vh_wo&wt=javabin&version=2}
 status=0 QTime=3971
   [junit4]   2> 30671 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
   [junit4]   2> 30672 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:38619/solr ready
   [junit4]   2> 30673 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ChaosMonkey monkey: init - expire sessions:false cause connection 
loss:false
   [junit4]   2> 30680 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params 
replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=1&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 30683 INFO  
(OverseerThreadFactory-125-thread-2-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.CreateCollectionCmd Create collection 
collection1
   [junit4]   2> 30684 INFO  
(OverseerCollectionConfigSetProcessor-99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000)
 [n:127.0.0.1:41973_vh_wo    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000000 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 30684 WARN  
(OverseerThreadFactory-125-thread-2-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.CreateCollectionCmd It is unusual to 
create a collection (collection1) without cores.
   [junit4]   2> 30889 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most 
30 seconds. Check all shard replicas
   [junit4]   2> 30889 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=1&wt=javabin&version=2}
 status=0 QTime=208
   [junit4]   2> 30976 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/shard-1-001
 of type NRT
   [junit4]   2> 30977 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] o.e.j.s.Server 
jetty-9.3.20.v20170531
   [junit4]   2> 30981 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.e.j.s.h.ContextHandler Started 
o.e.j.s.ServletContextHandler@779188{/vh_wo,null,AVAILABLE}
   [junit4]   2> 30982 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.e.j.s.AbstractConnector Started ServerConnector@83b502{SSL,[ssl, 
http/1.1]}{127.0.0.1:35837}
   [junit4]   2> 30982 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] o.e.j.s.Server 
Started @32439ms
   [junit4]   2> 30982 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.s.e.JettySolrRunner Jetty properties: 
{solr.data.dir=hdfs://localhost.localdomain:33053/hdfs__localhost.localdomain_33053__home_jenkins_workspace_Lucene-Solr-master-Linux_solr_build_solr-core_test_J0_temp_solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001_tempDir-002_jetty1,
 solrconfig=solrconfig.xml, hostContext=/vh_wo, hostPort=35837, 
coreRootDirectory=/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/shard-1-001/cores}
   [junit4]   2> 30982 ERROR 
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be 
missing or incomplete.
   [junit4]   2> 30982 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter  ___      _       Welcome to Apache Solr? version 
8.0.0
   [junit4]   2> 30982 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter / __| ___| |_ _   Starting in cloud mode on port null
   [junit4]   2> 30983 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_|  Install dir: null
   [junit4]   2> 30983 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter |___/\___/_|_|    Start time: 
2017-11-19T18:03:05.460Z
   [junit4]   2> 30994 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in 
ZooKeeper)
   [junit4]   2> 30994 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig Loading container configuration from 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/shard-1-001/solr.xml
   [junit4]   2> 30998 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter autoReplicaFailoverWorkLoopDelay 
is ignored
   [junit4]   2> 30998 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig Configuration parameter 
autoReplicaFailoverBadNodeExpiration is ignored
   [junit4]   2> 31007 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.SolrXmlConfig MBean server found: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24, but no JMX reporters were 
configured - adding default JMX reporter.
   [junit4]   2> 31011 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ZkContainer Zookeeper client=127.0.0.1:38619/solr
   [junit4]   2> 31023 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (0) -> (1)
   [junit4]   2> 31026 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.c.Overseer Overseer (id=null) closing
   [junit4]   2> 31030 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.c.ZkController Register node as live in 
ZooKeeper:/live_nodes/127.0.0.1:35837_vh_wo
   [junit4]   2> 31032 INFO  (zkCallback-35-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
   [junit4]   2> 31032 INFO  
(zkCallback-27-thread-2-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (1) -> (2)
   [junit4]   2> 31034 INFO  
(zkCallback-40-thread-1-processing-n:127.0.0.1:35837_vh_wo) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (1) -> (2)
   [junit4]   2> 31245 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.node' (registry 'solr.node') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 31276 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jvm' (registry 'solr.jvm') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 31284 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.jetty' (registry 'solr.jetty') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 31287 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.c.CorePropertiesLocator Found 0 core 
definitions underneath 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/../../../../../../../../../home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/shard-1-001/cores
   [junit4]   2> 31437 INFO  (qtp19624113-485) [n:127.0.0.1:35837_vh_wo    ] 
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params 
node=127.0.0.1:35837_vh_wo&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2
 and sendToOCPQueue=true
   [junit4]   2> 31439 INFO  
(OverseerCollectionConfigSetProcessor-99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000)
 [n:127.0.0.1:41973_vh_wo    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000002 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 31441 INFO  
(OverseerThreadFactory-125-thread-3-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.AddReplicaCmd Node Identified 
127.0.0.1:35837_vh_wo for creating new replica
   [junit4]   2> 31447 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo    ] 
o.a.s.h.a.CoreAdminOperation core create command 
qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT
   [junit4]   2> 31448 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo    ] 
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4 
transient cores
   [junit4]   2> 32466 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SolrConfig 
Using Lucene MatchVersion: 8.0.0
   [junit4]   2> 32495 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.s.IndexSchema 
[collection1_shard1_replica_n21] Schema name=test
   [junit4]   2> 32659 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.s.IndexSchema 
Loaded schema test/1.0 with uniqueid field id
   [junit4]   2> 32682 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.CoreContainer 
Creating SolrCore 'collection1_shard1_replica_n21' using configuration from 
collection collection1, trusted=true
   [junit4]   2> 32683 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.m.r.SolrJmxReporter JMX monitoring for 
'solr.core.collection1.shard1.replica_n21' (registry 
'solr.core.collection1.shard1.replica_n21') enabled at server: 
com.sun.jmx.mbeanserver.JmxMBeanServer@ee9e24
   [junit4]   2> 32683 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory 
solr.hdfs.home=hdfs://localhost.localdomain:33053/solr_hdfs_home
   [junit4]   2> 32683 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled
   [junit4]   2> 32684 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SolrCore 
solr.RecoveryStrategy.Builder
   [junit4]   2> 32684 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SolrCore 
[[collection1_shard1_replica_n21] ] Opening new SolrCore at 
[/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J0/temp/solr.index.hdfs.CheckHdfsIndexTest_6B515ACCA25292F-001/shard-1-001/cores/collection1_shard1_replica_n21],
 
dataDir=[hdfs://localhost.localdomain:33053/solr_hdfs_home/collection1/core_node22/data/]
   [junit4]   2> 32686 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory creating directory factory for path 
hdfs://localhost.localdomain:33053/solr_hdfs_home/collection1/core_node22/data/snapshot_metadata
   [junit4]   2> 32695 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct 
memory allocation set to [true]
   [junit4]   2> 32695 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of 
[8388608] will allocate [1] slabs and use ~[8388608] bytes
   [junit4]   2> 32695 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
   [junit4]   2> 32703 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.s.b.BlockDirectory Block cache on write is disabled
   [junit4]   2> 32704 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory creating directory factory for path 
hdfs://localhost.localdomain:33053/solr_hdfs_home/collection1/core_node22/data
   [junit4]   2> 32732 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory creating directory factory for path 
hdfs://localhost.localdomain:33053/solr_hdfs_home/collection1/core_node22/data/index
   [junit4]   2> 32739 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct 
memory allocation set to [true]
   [junit4]   2> 32739 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of 
[8388608] will allocate [1] slabs and use ~[8388608] bytes
   [junit4]   2> 32739 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
   [junit4]   2> 32743 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.s.b.BlockDirectory Block cache on write is disabled
   [junit4]   2> 32744 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy: 
minMergeSize=1000, mergeFactor=19, maxMergeSize=9223372036854775807, 
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=true, 
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.0]
   [junit4]   2> 32774 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741826_1002{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|RBW]]}
 size 0
   [junit4]   2> 32778 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741826_1002 size 69
   [junit4]   2> 32783 WARN  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type = 
requestHandler,name = /dump,class = DumpRequestHandler,attributes = 
{initParams=a, name=/dump, class=DumpRequestHandler},args = 
{defaults={a=A,b=B}}}
   [junit4]   2> 32844 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.UpdateHandler 
Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog
   [junit4]   2> 32844 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.UpdateLog 
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100 
maxNumLogsToKeep=10 numVersionBuckets=65536
   [junit4]   2> 32844 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.HdfsUpdateLog 
Initializing HdfsUpdateLog: tlogDfsReplication=2
   [junit4]   2> 32855 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.CommitTracker 
Hard AutoCommit: disabled
   [junit4]   2> 32855 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.CommitTracker 
Soft AutoCommit: disabled
   [junit4]   2> 32857 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class 
org.apache.lucene.index.TieredMergePolicy: [TieredMergePolicy: 
maxMergeAtOnce=16, maxMergeAtOnceExplicit=19, maxMergedSegmentMB=12.9619140625, 
floorSegmentMB=2.181640625, forceMergeDeletesPctAllowed=26.798415289669794, 
segmentsPerTier=18.0, maxCFSSegmentSizeMB=8.796093022207999E12, 
noCFSRatio=0.8082669045615982
   [junit4]   2> 32875 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@5ebc1f[collection1_shard1_replica_n21] main]
   [junit4]   2> 32877 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase: 
/configs/conf1
   [junit4]   2> 32885 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using 
ZooKeeperStorageIO:path=/configs/conf1
   [junit4]   2> 32887 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
   [junit4]   2> 32891 INFO  
(searcherExecutor-139-thread-1-processing-n:127.0.0.1:35837_vh_wo 
x:collection1_shard1_replica_n21 s:shard1 c:collection1) 
[n:127.0.0.1:35837_vh_wo c:collection1 s:shard1  
x:collection1_shard1_replica_n21] o.a.s.c.SolrCore 
[collection1_shard1_replica_n21] Registered new searcher 
Searcher@5ebc1f[collection1_shard1_replica_n21] 
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
   [junit4]   2> 32891 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.u.UpdateLog 
Could not find max version in index or recent updates, using new clock 
1584518489563987968
   [junit4]   2> 32901 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
   [junit4]   2> 32901 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
   [junit4]   2> 32901 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy 
Sync replicas to https://127.0.0.1:35837/vh_wo/collection1_shard1_replica_n21/
   [junit4]   2> 32903 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy 
Sync Success - now sync replicas to me
   [junit4]   2> 32903 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.SyncStrategy 
https://127.0.0.1:35837/vh_wo/collection1_shard1_replica_n21/ has no replicas
   [junit4]   2> 32903 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in 
election, clear LIR
   [junit4]   2> 32916 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] 
o.a.s.c.ShardLeaderElectionContext I am the new leader: 
https://127.0.0.1:35837/vh_wo/collection1_shard1_replica_n21/ shard1
   [junit4]   2> 33067 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.c.ZkController 
I am the leader, no recovery necessary
   [junit4]   2> 33075 INFO  (qtp19624113-492) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1  x:collection1_shard1_replica_n21] o.a.s.s.HttpSolrCall 
[admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_n21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=1627
   [junit4]   2> 33078 INFO  (qtp19624113-485) [n:127.0.0.1:35837_vh_wo    ] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections 
params={node=127.0.0.1:35837_vh_wo&action=ADDREPLICA&collection=collection1&shard=shard1&type=NRT&wt=javabin&version=2}
 status=0 QTime=1640
   [junit4]   2> 33082 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.SolrTestCaseJ4 ###Starting doTest
   [junit4]   2> 33091 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):330
   [junit4]   2> 33441 INFO  
(OverseerCollectionConfigSetProcessor-99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000)
 [n:127.0.0.1:41973_vh_wo    ] o.a.s.c.OverseerTaskQueue Response ZK path: 
/overseer/collection-queue-work/qnr-0000000004 doesn't exist.  Requestor may 
have disconnected from ZooKeeper
   [junit4]   2> 34092 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 34177 INFO  (qtp24773453-433) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory 
[control_collection_shard1_replica_n1]  webapp=/vh_wo path=/update 
params={wt=javabin&version=2&CONTROL=TRUE}{add=[1 (1584518490836959232)]} 0 76
   [junit4]   2> 34248 INFO  (qtp19624113-490) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1_shard1_replica_n21]  
webapp=/vh_wo path=/update params={wt=javabin&version=2}{add=[1 
(1584518490914553856)]} 0 69
   [junit4]   2> 34251 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 start 
commit{_version_=1584518490990051328,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 34251 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.u.SolrIndexWriter Calling 
setCommitData with IW:org.apache.solr.update.SolrIndexWriter@f5949c 
commitCommandVersion:1584518490990051328
   [junit4]   2> 34328 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741829_1005{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|FINALIZED]]}
 size 0
   [junit4]   2> 34329 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741829_1005{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|FINALIZED]]}
 size 0
   [junit4]   2> 34390 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741830_1006{UCState=COMMITTED, truncateBlock=null, primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|RBW]]}
 size 361
   [junit4]   2> 34391 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741830_1006 size 361
   [junit4]   2> 34819 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741831_1007{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|RBW]]}
 size 0
   [junit4]   2> 34822 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741831_1007{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|RBW]]}
 size 0
   [junit4]   2> 34857 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741832_1008{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW]]}
 size 0
   [junit4]   2> 34858 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741832_1008{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|FINALIZED]]}
 size 0
   [junit4]   2> 34879 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.s.SolrIndexSearcher Opening 
[Searcher@edbedc[control_collection_shard1_replica_n1] main]
   [junit4]   2> 34897 INFO  
(searcherExecutor-128-thread-1-processing-n:127.0.0.1:41973_vh_wo 
x:control_collection_shard1_replica_n1 s:shard1 c:control_collection 
r:core_node2) [n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 
r:core_node2 x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore 
[control_collection_shard1_replica_n1] Registered new searcher 
Searcher@edbedc[control_collection_shard1_replica_n1] 
main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(8.0.0):c1)))}
   [junit4]   2> 34905 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741827_1003{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|RBW]]}
 size 255
   [junit4]   2> 34908 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741827_1003{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|RBW]]}
 size 255
   [junit4]   2> 34910 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.u.DirectUpdateHandler2 
end_commit_flush
   [junit4]   2> 34910 INFO  (qtp24773453-434) [n:127.0.0.1:41973_vh_wo 
c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.u.p.LogUpdateProcessorFactory 
[control_collection_shard1_replica_n1]  webapp=/vh_wo path=/update 
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 659
   [junit4]   2> 34914 INFO  (qtp19624113-486) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.u.DirectUpdateHandler2 start 
commit{_version_=1584518491685257216,optimize=false,openSearcher=true,waitSearcher=true,expungeDeletes=false,softCommit=false,prepareCommit=false}
   [junit4]   2> 34914 INFO  (qtp19624113-486) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.u.SolrIndexWriter Calling setCommitData with 
IW:org.apache.solr.update.SolrIndexWriter@19e5213 
commitCommandVersion:1584518491685257216
   [junit4]   2> 34955 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741833_1009{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|FINALIZED]]}
 size 0
   [junit4]   2> 34955 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741833_1009{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|FINALIZED]]}
 size 0
   [junit4]   2> 34989 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741834_1010{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|FINALIZED]]}
 size 0
   [junit4]   2> 34993 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741834_1010{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|FINALIZED]]}
 size 0
   [junit4]   2> 35025 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741835_1011{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|RBW],
 
ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|FINALIZED]]}
 size 0
   [junit4]   2> 35025 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741835_1011{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|FINALIZED],
 
ReplicaUC[[DISK]DS-9e40cd40-920b-4ed8-b24a-d8e3e9fc3638:NORMAL:127.0.0.1:40705|FINALIZED]]}
 size 0
   [junit4]   2> 35051 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741836_1012{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-7129cf1e-8d1e-494c-8a64-2fa2690b4f96:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|FINALIZED]]}
 size 0
   [junit4]   2> 35052 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741836_1012{UCState=UNDER_CONSTRUCTION, truncateBlock=null, 
primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|FINALIZED],
 
ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|FINALIZED]]}
 size 0
   [junit4]   2> 35081 INFO  (qtp19624113-486) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.s.SolrIndexSearcher Opening 
[Searcher@1524683[collection1_shard1_replica_n21] main]
   [junit4]   2> 35096 INFO  
(searcherExecutor-139-thread-1-processing-n:127.0.0.1:35837_vh_wo 
x:collection1_shard1_replica_n21 s:shard1 c:collection1 r:core_node22) 
[n:127.0.0.1:35837_vh_wo c:collection1 s:shard1 r:core_node22 
x:collection1_shard1_replica_n21] o.a.s.c.SolrCore 
[collection1_shard1_replica_n21] Registered new searcher 
Searcher@1524683[collection1_shard1_replica_n21] 
main{ExitableDirectoryReader(UninvertingDirectoryReader(Uninverting(_0(8.0.0):c1)))}
   [junit4]   2> 35109 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:40705 is added to 
blk_1073741828_1004{UCState=COMMITTED, truncateBlock=null, primaryNodeIndex=-1, 
replicas=[ReplicaUC[[DISK]DS-861e474a-08ce-43d5-bc9a-cb6a92b59e14:NORMAL:127.0.0.1:41669|RBW],
 
ReplicaUC[[DISK]DS-24ed71f7-8ff8-4327-a9e7-c2603584c910:NORMAL:127.0.0.1:40705|RBW]]}
 size 284
   [junit4]   2> 35109 INFO  (Block report processor) [    ] BlockStateChange 
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:41669 is added to 
blk_1073741828_1004 size 284
   [junit4]   2> 35508 INFO  (qtp19624113-486) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.u.DirectUpdateHandler2 end_commit_flush
   [junit4]   2> 35508 INFO  (qtp19624113-486) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.u.p.LogUpdateProcessorFactory [collection1_shard1_replica_n21]  
webapp=/vh_wo path=/update 
params={waitSearcher=true&commit=true&softCommit=false&wt=javabin&version=2}{commit=}
 0 595
   [junit4]   2> 35510 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Wait for recoveries to finish - collection: 
collection1 failOnTimeout:true timeout (sec):330
   [junit4]   2> 35510 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.AbstractDistribZkTestBase Recoveries finished - collection: collection1
   [junit4]   2> 35546 INFO  (qtp19624113-485) [n:127.0.0.1:35837_vh_wo 
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_n21] 
o.a.s.c.S.Request [collection1_shard1_replica_n21]  webapp=/vh_wo 
path=/admin/system params={qt=/admin/system&wt=javabin&version=2} status=0 
QTime=28
   [junit4]   1> 
   [junit4]   1> Opening index @ 
hdfs://localhost.localdomain:33053/solr_hdfs_home/collection1/core_node22/data/index
   [junit4]   1> 
   [junit4]   1> Segments file=segments_2 numSegments=1 version=8.0.0 
id=a4kbypunweuub246y7otbwu1c userData={commitCommandVer=1584518491685257216, 
commitTimeMSec=1511114589391}
   [junit4]   1>   1 of 1: name=_0 maxDoc=1
   [junit4]   1>     version=8.0.0
   [junit4]   1>     id=a4kbypunweuub246y7otbwu1a
   [junit4]   1>     codec=CheapBastard
   [junit4]   1>     compound=true
   [junit4]   1>     numFiles=3
   [junit4]   1>     size (MB)=0.004
   [junit4]   1>     diagnostics = {java.runtime.version=1.8.0_144-b01, 
java.vendor=Oracle Corporation, java.version=1.8.0_144, 
java.vm.version=25.144-b01, lucene.version=8.0.0, os=Linux, os.arch=i386, 
os.version=4.10.0-37-generic, source=flush, timestamp=1511114589402}
   [junit4]   1>     no deletions
   [junit4]   1>     test: open reader.........OK [took 0.021 sec]
   [junit4]   1>     test: check integrity.....OK [took 0.005 sec]
   [junit4]   1>     test: check live docs.....OK [took 0.001 sec]
   [junit4]   1>     test: field infos.........OK [20 fields] [took 0.001 sec]
   [junit4]   1>     test: field norms.........OK [3 fields] [took 0.001 sec]
   [junit4]   1>     test: terms, freq, prox...OK [3 terms; 3 terms/docs pairs; 
3 tokens] [took 0.011 sec]
   [junit4]   1>     test: stored fields.......OK [18 total field count; avg 
18.0 fields per doc] [took 0.002 sec]
   [junit4]   1>     test: term vectors........OK [0 total term vector count; 
avg 0.0 term/freq vector fields per doc] [took 0.001 sec]
   [junit4]   1>     test: docvalues...........OK [5 docvalues fields; 0 
BINARY; 3 NUMERIC; 0 SORTED; 2 SORTED_NUMERIC; 0 SORTED_SET] [took 0.003 sec]
   [junit4]   1>     test: points..............OK [15 fields, 15 points] [took 
0.024 sec]
   [junit4]   1> 
   [junit4]   1> No problems were detected with this index.
   [junit4]   1> 
   [junit4]   1> Took 0.107 sec total.
   [junit4]   1> 
   [junit4]   2> 35715 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.h.HdfsDirectory Closing hdfs directory 
hdfs://localhost.localdomain:33053/solr_hdfs_home/collection1/core_node22/data/index
   [junit4]   2> 35715 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.s.h.HdfsDirectory Closing hdfs directory 
hdfs://localhost.localdomain:33053/solr
   [junit4]   2> 35716 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.SolrTestCaseJ4 ###Ending doTest
   [junit4]   2> 35716 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ChaosMonkey monkey: stop jetty! 41973
   [junit4]   2> 35716 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.CoreContainer Shutting down CoreContainer instance=13259442
   [junit4]   2> 35716 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.node, 
tag=null
   [junit4]   2> 35717 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.r.SolrJmxReporter Closing reporter 
[org.apache.solr.metrics.reporters.SolrJmxReporter@17ebd70: rootName = null, 
domain = solr.node, service url = null, agent id = null] for registry solr.node 
/ com.codahale.metrics.MetricRegistry@b9b244
   [junit4]   2> 35726 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.jvm, 
tag=null
   [junit4]   2> 35726 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.r.SolrJmxReporter Closing reporter 
[org.apache.solr.metrics.reporters.SolrJmxReporter@3ac6bd: rootName = null, 
domain = solr.jvm, service url = null, agent id = null] for registry solr.jvm / 
com.codahale.metrics.MetricRegistry@1bd1d02
   [junit4]   2> 35732 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.jetty, 
tag=null
   [junit4]   2> 35732 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.r.SolrJmxReporter Closing reporter 
[org.apache.solr.metrics.reporters.SolrJmxReporter@1403a8d: rootName = null, 
domain = solr.jetty, service url = null, agent id = null] for registry 
solr.jetty / com.codahale.metrics.MetricRegistry@497deb
   [junit4]   2> 35734 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ZkController Remove node as live in 
ZooKeeper:/live_nodes/127.0.0.1:41973_vh_wo
   [junit4]   2> 35735 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] 
o.a.s.m.SolrMetricManager Closing metric reporters for registry=solr.cluster, 
tag=null
   [junit4]   2> 35735 INFO  (zkCallback-35-thread-1) [    ] 
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (1)
   [junit4]   2> 35771 INFO  
(zkCallback-40-thread-1-processing-n:127.0.0.1:35837_vh_wo) 
[n:127.0.0.1:35837_vh_wo    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (2) -> (1)
   [junit4]   2> 35771 INFO  
(zkCallback-27-thread-2-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader Updated live nodes from 
ZooKeeper... (2) -> (1)
   [junit4]   2> 35772 INFO  
(zkCallback-27-thread-1-processing-n:127.0.0.1:41973_vh_wo) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.c.ZkStateReader A cluster state change: 
[WatchedEvent state:SyncConnected type:NodeDataChanged 
path:/collections/control_collection/state.json] for collection 
[control_collection] has occurred - updating... (live nodes size: [1])
   [junit4]   2> 35772 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore 
[control_collection_shard1_replica_n1]  CLOSING SolrCore 
org.apache.solr.core.SolrCore@133b484
   [junit4]   2> 35772 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.m.SolrMetricManager Closing 
metric reporters for registry=solr.core.control_collection.shard1.replica_n1, 
tag=20165764
   [junit4]   2> 35773 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.m.r.SolrJmxReporter Closing 
reporter [org.apache.solr.metrics.reporters.SolrJmxReporter@1ad6569: rootName = 
null, domain = solr.core.control_collection.shard1.replica_n1, service url = 
null, agent id = null] for registry 
solr.core.control_collection.shard1.replica_n1 / 
com.codahale.metrics.MetricRegistry@1fcb5a1
   [junit4]   2> 35798 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.m.SolrMetricManager Closing 
metric reporters for registry=solr.collection.control_collection.shard1.leader, 
tag=20165764
   [junit4]   2> 35816 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.s.h.HdfsDirectory Closing hdfs 
directory 
hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data/index
   [junit4]   2> 35825 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.s.h.HdfsDirectory Closing hdfs 
directory 
hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data/snapshot_metadata
   [junit4]   2> 35825 INFO  (coreCloseExecutor-144-thread-1) 
[n:127.0.0.1:41973_vh_wo c:control_collection s:shard1 r:core_node2 
x:control_collection_shard1_replica_n1] o.a.s.s.h.HdfsDirectory Closing hdfs 
directory 
hdfs://localhost.localdomain:33053/solr_hdfs_home/control_collection/core_node2/data
   [junit4]   2> 35834 INFO  
(TEST-CheckHdfsIndexTest.doTest-seed#[6B515ACCA25292F]) [    ] o.a.s.c.Overseer 
Overseer (id=99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000) closing
   [junit4]   2> 35835 WARN  
(OverseerAutoScalingTriggerThread-99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000)
 [n:127.0.0.1:41973_vh_wo    ] o.a.s.c.a.OverseerTriggerThread 
OverseerTriggerThread woken up but we are closed, exiting.
   [junit4]   2> 35836 INFO  
(OverseerStateUpdate-99032405120843781-127.0.0.1:41973_vh_wo-n_0000000000) 
[n:127.0.0.1:41973_vh_wo    ] o.a.s.c.Overseer Overseer Loop exiting : 
127.0.0.1:41973_vh_wo
   [junit4]   2> 35851 INFO  
(zkCallback-40-thread-1-processing-n:127.0.0.1:35837_vh_wo) 
[n:127.0.0.1:35837_vh_wo    ] o

[...truncated too long message...]

ervlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1759)
   [junit4]   2>        at 
org.apache.solr.client.solrj.embedded.JettySolrRunner$DebugFilter.doFilter(JettySolrRunner.java:139)
   [junit4]   2>        at 
org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1759)
   [junit4]   2>        at 
org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:582)
   [junit4]   2>        at 
org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:224)
   [junit4]   2>        at 
org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1180)
   [junit4]   2>        at 
org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:512)
   [junit4]   2>        at 
org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185)
   [junit4]   2>        at 
org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1112)
   [junit4]   2>        at 
org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
   [junit4]   2>        at 
org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:426)
   [junit4]   2>        at 
org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:134)
   [junit4]   2>        at 
org.eclipse.jetty.server.Server.handle(Server.java:534)
   [junit4]   2>        at 
org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:320)
   [junit4]   2>        at 
org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:251)
   [junit4]   2>        at 
org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:283)
   [junit4]   2>        at 
org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:108)
   [junit4]   2>        at 
org.eclipse.jetty.io.SelectChannelEndPoint$2.run(SelectChannelEndPoint.java:93)
   [junit4]   2>        at 
org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.executeProduceConsume(ExecuteProduceConsume.java:303)
   [junit4]   2>        at 
org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.produceConsume(ExecuteProduceConsume.java:148)
   [junit4]   2>        at 
org.eclipse.jetty.util.thread.strategy.ExecuteProduceConsume.run(ExecuteProduceConsume.java:136)
   [junit4]   2>        at 
org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:671)
   [junit4]   2>        at 
org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:589)
   [junit4]   2>        at java.lang.Thread.run(Thread.java:748)
   [junit4]   2> Caused by: org.apache.solr.common.SolrException: No registered 
leader was found after waiting for 1220000ms , collection: control_collection 
slice: shard1 saw state=null with live_nodes=[]
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderRetry(ZkStateReader.java:771)
   [junit4]   2>        at 
org.apache.solr.common.cloud.ZkStateReader.getLeaderUrl(ZkStateReader.java:736)
   [junit4]   2>        at 
org.apache.solr.cloud.ZkController.getLeader(ZkController.java:1178)
   [junit4]   2>        ... 41 more
   [junit4]   2> 
   [junit4]   2> 753277 INFO  (qtp3687883-860) [n:127.0.0.1:41365_y_%2Fsi 
c:control_collection s:shard1  x:control_collection_shard1_replica_n1] 
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores 
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
 status=0 QTime=630242
   [junit4]   2> 753278 ERROR 
(TEST-ChaosMonkeySafeLeaderTest.test-seed#[6B515ACCA25292F]) [    ] 
o.a.z.s.ZooKeeperServer ZKShutdownHandler is not registered, so ZooKeeper 
server won't take any action on ERROR or SHUTDOWN server state changes
   [junit4]   2> 753278 INFO  
(TEST-ChaosMonkeySafeLeaderTest.test-seed#[6B515ACCA25292F]) [    ] 
o.a.s.c.ZkTestServer connecting to 127.0.0.1:45557 45557
   [junit4]   2> 759531 INFO  (Thread-142) [    ] o.a.s.c.ZkTestServer 
connecting to 127.0.0.1:45557 45557
   [junit4]   2> 759533 WARN  (Thread-142) [    ] o.a.s.c.ZkTestServer Watch 
limit violations: 
   [junit4]   2> Maximum concurrent create/delete watches above limit:
   [junit4]   2> 
   [junit4]   2>        2       /solr/aliases.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent data watches above limit:
   [junit4]   2> 
   [junit4]   2>        2       /solr/clusterstate.json
   [junit4]   2>        2       /solr/clusterprops.json
   [junit4]   2> 
   [junit4]   2> Maximum concurrent children watches above limit:
   [junit4]   2> 
   [junit4]   2>        2       /solr/live_nodes
   [junit4]   2>        2       /solr/collections
   [junit4]   2> 
   [junit4]   2> NOTE: reproduce with: ant test  
-Dtestcase=ChaosMonkeySafeLeaderTest -Dtests.method=test 
-Dtests.seed=6B515ACCA25292F -Dtests.multiplier=3 -Dtests.slow=true 
-Dtests.locale=sk-SK -Dtests.timezone=America/Rosario -Dtests.asserts=true 
-Dtests.file.encoding=ISO-8859-1
   [junit4] ERROR    637s J1 | ChaosMonkeySafeLeaderTest.test <<<
   [junit4]    > Throwable #1: 
org.apache.solr.client.solrj.SolrServerException: Timeout occured while waiting 
response from server at: http://127.0.0.1:41365/y_/si
   [junit4]    >        at 
__randomizedtesting.SeedInfo.seed([6B515ACCA25292F:8EE12A7664D944D7]:0)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:654)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1103)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:883)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:816)
   [junit4]    >        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194)
   [junit4]    >        at 
org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211)
   [junit4]    >        at 
org.apache.solr.cloud.AbstractFullDistribZkTestBase.createServers(AbstractFullDistribZkTestBase.java:314)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:991)
   [junit4]    >        at 
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
   [junit4]    >        at java.lang.Thread.run(Thread.java:748)
   [junit4]    > Caused by: java.net.SocketTimeoutException: Read timed out
   [junit4]    >        at java.net.SocketInputStream.socketRead0(Native Method)
   [junit4]    >        at 
java.net.SocketInputStream.socketRead(SocketInputStream.java:116)
   [junit4]    >        at 
java.net.SocketInputStream.read(SocketInputStream.java:171)
   [junit4]    >        at 
java.net.SocketInputStream.read(SocketInputStream.java:141)
   [junit4]    >        at 
org.apache.http.impl.io.SessionInputBufferImpl.streamRead(SessionInputBufferImpl.java:137)
   [junit4]    >        at 
org.apache.http.impl.io.SessionInputBufferImpl.fillBuffer(SessionInputBufferImpl.java:153)
   [junit4]    >        at 
org.apache.http.impl.io.SessionInputBufferImpl.readLine(SessionInputBufferImpl.java:282)
   [junit4]    >        at 
org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:138)
   [junit4]    >        at 
org.apache.http.impl.conn.DefaultHttpResponseParser.parseHead(DefaultHttpResponseParser.java:56)
   [junit4]    >        at 
org.apache.http.impl.io.AbstractMessageParser.parse(AbstractMessageParser.java:259)
   [junit4]    >        at 
org.apache.http.impl.DefaultBHttpClientConnection.receiveResponseHeader(DefaultBHttpClientConnection.java:163)
   [junit4]    >        at 
org.apache.http.impl.conn.CPoolProxy.receiveResponseHeader(CPoolProxy.java:165)
   [junit4]    >        at 
org.apache.http.protocol.HttpRequestExecutor.doReceiveResponse(HttpRequestExecutor.java:273)
   [junit4]    >        at 
org.apache.http.protocol.HttpRequestExecutor.execute(HttpRequestExecutor.java:125)
   [junit4]    >        at 
org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:272)
   [junit4]    >        at 
org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:185)
   [junit4]    >        at 
org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89)
   [junit4]    >        at 
org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:111)
   [junit4]    >        at 
org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185)
   [junit4]    >        at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83)
   [junit4]    >        at 
org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56)
   [junit4]    >        at 
org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:542)
   [junit4]    >        ... 43 more
   [junit4]   2> NOTE: leaving temporary files on disk at: 
/home/jenkins/workspace/Lucene-Solr-master-Linux/solr/build/solr-core/test/J1/temp/solr.cloud.ChaosMonkeySafeLeaderTest_6B515ACCA25292F-001
   [junit4]   2> NOTE: test params are: codec=Asserting(Lucene70): {}, 
docValues:{}, maxPointsInLeafNode=1298, maxMBSortInHeap=5.1049925703585854, 
sim=Asserting(org.apache.lucene.search.similarities.AssertingSimilarity@30828a),
 locale=sk-SK, timezone=America/Rosario
   [junit4]   2> NOTE: Linux 4.10.0-37-generic i386/Oracle Corporation 
1.8.0_144 (32-bit)/cpus=8,threads=1,free=33691904,total=78286848
   [junit4]   2> NOTE: All tests run in this JVM: [CircularListTest, 
MoreLikeThisHandlerTest, BadIndexSchemaTest, TestDocumentBuilder, 
TestRequestStatusCollectionAPI, CdcrBidirectionalTest, 
CollectionTooManyReplicasTest, ChaosMonkeySafeLeaderTest]
   [junit4] Completed [46/750 (2!)] on J1 in 637.46s, 1 test, 1 error <<< 
FAILURES!

[...truncated 45484 lines...]
---------------------------------------------------------------------
To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org
For additional commands, e-mail: dev-h...@lucene.apache.org

Reply via email to