Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1519/
2 tests failed.
FAILED: org.apache.solr.cloud.hdfs.StressHdfsTest.test
Error Message:
Could not find collection:delete_data_dir
Stack Trace:
java.lang.AssertionError: Could not find collection:delete_data_dir
at
__randomizedtesting.SeedInfo.seed([5A3400F4114CAED4:D2603F2EBFB0C32C]:0)
at org.junit.Assert.fail(Assert.java:93)
at org.junit.Assert.assertTrue(Assert.java:43)
at org.junit.Assert.assertNotNull(Assert.java:526)
at
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:155)
at
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:140)
at
org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:135)
at
org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:915)
at
org.apache.solr.cloud.hdfs.StressHdfsTest.createAndDeleteCollection(StressHdfsTest.java:161)
at
org.apache.solr.cloud.hdfs.StressHdfsTest.test(StressHdfsTest.java:105)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993)
at
org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at java.lang.Thread.run(Thread.java:748)
FAILED: org.apache.solr.uninverting.TestDocTermOrds.testTriggerUnInvertLimit
Error Message:
GC overhead limit exceeded
Stack Trace:
java.lang.OutOfMemoryError: GC overhead limit exceeded
at
__randomizedtesting.SeedInfo.seed([5A3400F4114CAED4:698628301CFB7463]:0)
at
org.apache.lucene.codecs.memory.DirectPostingsFormat$DirectField.<init>(DirectPostingsFormat.java:461)
at
org.apache.lucene.codecs.memory.DirectPostingsFormat$DirectFields.<init>(DirectPostingsFormat.java:132)
at
org.apache.lucene.codecs.memory.DirectPostingsFormat.fieldsProducer(DirectPostingsFormat.java:116)
at
org.apache.lucene.codecs.perfield.PerFieldPostingsFormat$FieldsReader.<init>(PerFieldPostingsFormat.java:293)
at
org.apache.lucene.codecs.perfield.PerFieldPostingsFormat.fieldsProducer(PerFieldPostingsFormat.java:373)
at
org.apache.lucene.index.SegmentCoreReaders.<init>(SegmentCoreReaders.java:113)
at org.apache.lucene.index.SegmentReader.<init>(SegmentReader.java:78)
at
org.apache.lucene.index.ReadersAndUpdates.getReader(ReadersAndUpdates.java:196)
at
org.apache.lucene.index.ReadersAndUpdates.getReadOnlyClone(ReadersAndUpdates.java:234)
at
org.apache.lucene.index.StandardDirectoryReader.open(StandardDirectoryReader.java:105)
at org.apache.lucene.index.IndexWriter.getReader(IndexWriter.java:491)
at
org.apache.lucene.index.RandomIndexWriter.getReader(RandomIndexWriter.java:387)
at
org.apache.lucene.index.RandomIndexWriter.getReader(RandomIndexWriter.java:324)
at
org.apache.solr.uninverting.TestDocTermOrds.testTriggerUnInvertLimit(TestDocTermOrds.java:170)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at
sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at
sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970)
at
com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984)
at
org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49)
at
org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45)
at
org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48)
at
org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64)
at
org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
at
com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817)
at
com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468)
at
com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943)
Build Log:
[...truncated 1889 lines...]
[junit4] JVM J0: stdout was not empty, see:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/lucene/build/core/test/temp/junit4-J0-20180402_185022_0047395847841118455661.sysout
[junit4] >>> JVM J0 emitted unexpected output (verbatim) ----
[junit4] codec: HighCompressionCompressingStoredFields, pf: Lucene50, dvf:
Asserting
[junit4] <<< JVM J0: EOF ----
[...truncated 12613 lines...]
[junit4] Suite: org.apache.solr.cloud.hdfs.StressHdfsTest
[junit4] 2> Creating dataDir:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/init-core-data-001
[junit4] 2> 3183040 WARN
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.s.SolrTestCaseJ4 startTrackingSearchers: numOpens=1 numCloses=1
[junit4] 2> 3183044 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.s.SolrTestCaseJ4 Using PointFields (NUMERIC_POINTS_SYSPROP=true)
w/NUMERIC_DOCVALUES_SYSPROP=false
[junit4] 2> 3183045 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.s.SolrTestCaseJ4 Randomized ssl (false) and clientAuth (false) via:
@org.apache.solr.SolrTestCaseJ4$SuppressSSL(bugUrl=https://issues.apache.org/jira/browse/SOLR-5776)
[junit4] 2> 3183045 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.s.SolrTestCaseJ4 SecureRandom sanity checks:
test.solr.allowed.securerandom=null & java.security.egd=file:/dev/./urandom
[junit4] 2> 3183045 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.s.BaseDistributedSearchTestCase Setting hostContext system property: /
[junit4] 1> Formatting using clusterid: testClusterID
[junit4] 2> 3183093 WARN
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.h.m.i.MetricsConfig Cannot locate configuration: tried
hadoop-metrics2-namenode.properties,hadoop-metrics2.properties
[junit4] 2> 3183105 WARN
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 3183107 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log
jetty-6.1.26
[junit4] 2> 3183127 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log Extract
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/hdfs
to ./temp/Jetty_localhost_37778_hdfs____kmdzby/webapp
[junit4] 2> 3183562 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:37778
[junit4] 2> 3183630 WARN
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 3183632 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log
jetty-6.1.26
[junit4] 2> 3183641 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log Extract
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_57892_datanode____jw1r6e/webapp
[junit4] 2> 3184058 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:57892
[junit4] 2> 3184097 WARN
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ]
o.a.h.h.HttpRequestLog Jetty request log can only be enabled using Log4j
[junit4] 2> 3184098 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log
jetty-6.1.26
[junit4] 2> 3184110 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log Extract
jar:file:/x1/jenkins/.ivy2/cache/org.apache.hadoop/hadoop-hdfs/tests/hadoop-hdfs-2.7.4-tests.jar!/webapps/datanode
to ./temp/Jetty_localhost_44033_datanode____ar9ddj/webapp
[junit4] 2> 3184141 ERROR (DataNode:
[[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/tempDir-001/hdfsBaseDir/data/data1/,
[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/tempDir-001/hdfsBaseDir/data/data2/]]
heartbeating to localhost/127.0.0.1:52191) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 3184151 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x5191de7b05d829: from storage
DS-ee411a95-0395-42e9-8683-a13d875dc331 node
DatanodeRegistration(127.0.0.1:39124,
datanodeUuid=0e007af8-6b33-4492-883b-70511d24bca3, infoPort=42337,
infoSecurePort=0, ipcPort=44503,
storageInfo=lv=-56;cid=testClusterID;nsid=1644883274;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 3184151 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x5191de7b05d829: from storage
DS-faf1db4b-7f8e-42ed-9df3-9939a6b8c1a4 node
DatanodeRegistration(127.0.0.1:39124,
datanodeUuid=0e007af8-6b33-4492-883b-70511d24bca3, infoPort=42337,
infoSecurePort=0, ipcPort=44503,
storageInfo=lv=-56;cid=testClusterID;nsid=1644883274;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 3184554 INFO
(SUITE-StressHdfsTest-seed#[5A3400F4114CAED4]-worker) [ ] o.m.log Started
HttpServer2$SelectChannelConnectorWithSafeStartup@localhost:44033
[junit4] 2> 3185006 ERROR (DataNode:
[[[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/tempDir-001/hdfsBaseDir/data/data3/,
[DISK]file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/tempDir-001/hdfsBaseDir/data/data4/]]
heartbeating to localhost/127.0.0.1:52191) [ ]
o.a.h.h.s.d.DirectoryScanner
dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value below 1
ms/sec. Assuming default value of 1000
[junit4] 2> 3185012 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x5191deae5e6f2b: from storage
DS-f351dada-f296-4537-8f9d-185b1b50bd01 node
DatanodeRegistration(127.0.0.1:57980,
datanodeUuid=320ab2bc-7e80-4b0e-a634-141c8a2e4e26, infoPort=40396,
infoSecurePort=0, ipcPort=59675,
storageInfo=lv=-56;cid=testClusterID;nsid=1644883274;c=0), blocks: 0,
hasStaleStorage: true, processing time: 0 msecs
[junit4] 2> 3185013 INFO (Block report processor) [ ] BlockStateChange
BLOCK* processReport 0x5191deae5e6f2b: from storage
DS-0456f817-18e7-4482-8aeb-4db146ebe3ec node
DatanodeRegistration(127.0.0.1:57980,
datanodeUuid=320ab2bc-7e80-4b0e-a634-141c8a2e4e26, infoPort=40396,
infoSecurePort=0, ipcPort=59675,
storageInfo=lv=-56;cid=testClusterID;nsid=1644883274;c=0), blocks: 0,
hasStaleStorage: false, processing time: 0 msecs
[junit4] 2> 3185436 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.ZkTestServer
STARTING ZK TEST SERVER
[junit4] 2> 3185436 INFO (Thread-112683) [ ] o.a.s.c.ZkTestServer
client port:0.0.0.0/0.0.0.0:0
[junit4] 2> 3185436 INFO (Thread-112683) [ ] o.a.s.c.ZkTestServer
Starting server
[junit4] 2> 3185437 ERROR (Thread-112683) [ ] o.a.z.s.ZooKeeperServer
ZKShutdownHandler is not registered, so ZooKeeper server won't take any action
on ERROR or SHUTDOWN server state changes
[junit4] 2> 3185536 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.ZkTestServer
start zk server on port:60623
[junit4] 2> 3185538 INFO (zkConnectionManagerCallback-1951-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185541 INFO (zkConnectionManagerCallback-1953-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185544 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig-tlog.xml
to /configs/conf1/solrconfig.xml
[junit4] 2> 3185546 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/schema.xml
to /configs/conf1/schema.xml
[junit4] 2> 3185547 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/solrconfig.snippet.randomindexconfig.xml
to /configs/conf1/solrconfig.snippet.randomindexconfig.xml
[junit4] 2> 3185548 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/stopwords.txt
to /configs/conf1/stopwords.txt
[junit4] 2> 3185549 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/protwords.txt
to /configs/conf1/protwords.txt
[junit4] 2> 3185550 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/currency.xml
to /configs/conf1/currency.xml
[junit4] 2> 3185550 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/enumsConfig.xml
to /configs/conf1/enumsConfig.xml
[junit4] 2> 3185551 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/open-exchange-rates.json
to /configs/conf1/open-exchange-rates.json
[junit4] 2> 3185552 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/mapping-ISOLatin1Accent.txt
to /configs/conf1/mapping-ISOLatin1Accent.txt
[junit4] 2> 3185553 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/old_synonyms.txt
to /configs/conf1/old_synonyms.txt
[junit4] 2> 3185554 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractZkTestCase put
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/core/src/test-files/solr/collection1/conf/synonyms.txt
to /configs/conf1/synonyms.txt
[junit4] 2> 3185555 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase Will use TLOG replicas unless explicitly
asked otherwise
[junit4] 2> 3185645 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.Server
jetty-9.4.8.v20171121, build timestamp: 2017-11-21T14:27:37-07:00, git hash:
82b8fb23f757335bb3329d540ce37a2a2615f0a8
[junit4] 2> 3185645 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session
DefaultSessionIdManager workerName=node0
[junit4] 2> 3185645 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session No
SessionScavenger set, using defaults
[junit4] 2> 3185645 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session
Scavenging every 600000ms
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@4dc8fd96{/,null,AVAILABLE}
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@30b34d76{HTTP/1.1,[http/1.1]}{127.0.0.1:48789}
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.Server
Started @3185687ms
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=hdfs://localhost:52191/hdfs__localhost_52191__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-master_checkout_solr_build_solr-core_test_J0_temp_solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001_tempDir-002_control_data,
replicaType=NRT, hostContext=/, hostPort=48789,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/control-001/cores}
[junit4] 2> 3185646 ERROR
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter Using logger factory
org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
8.0.0
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 3185646 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 3185647 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2018-04-02T22:25:57.677Z
[junit4] 2> 3185648 INFO (zkConnectionManagerCallback-1955-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185649 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 3185649 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/control-001/solr.xml
[junit4] 2> 3185652 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 3185652 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 3185653 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0, but no JMX
reporters were configured - adding default JMX reporter.
[junit4] 2> 3185655 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.ZkContainer
Zookeeper client=127.0.0.1:60623/solr
[junit4] 2> 3185656 INFO (zkConnectionManagerCallback-1959-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185658 INFO (zkConnectionManagerCallback-1961-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185724 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 3185724 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.c.OverseerElectionContext I am going to be the leader 127.0.0.1:48789_
[junit4] 2> 3185725 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.c.Overseer Overseer (id=73562291366002692-127.0.0.1:48789_-n_0000000000)
starting
[junit4] 2> 3185730 INFO (zkConnectionManagerCallback-1966-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185731 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:60623/solr ready
[junit4] 2> 3185733 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:48789_
[junit4] 2> 3185734 INFO (zkCallback-1960-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3185737 INFO (zkCallback-1965-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3185863 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node')
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3185870 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm')
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3185870 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3185871 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:48789_ ]
o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/control-001/cores
[junit4] 2> 3185887 INFO (zkConnectionManagerCallback-1970-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3185888 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3185888 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:60623/solr ready
[junit4] 2> 3185891 INFO (qtp1257677904-119861) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:48789_&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 3185893 INFO (OverseerThreadFactory-3459-thread-1) [ ]
o.a.s.c.a.c.CreateCollectionCmd Create collection control_collection
[junit4] 2> 3185999 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT
[junit4] 2> 3185999 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 3186107 INFO (zkCallback-1960-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 3187021 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrConfig Using Lucene MatchVersion: 8.0.0
[junit4] 2> 3187033 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.IndexSchema [control_collection_shard1_replica_n1] Schema name=test
[junit4] 2> 3187136 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.IndexSchema Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 3187146 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.CoreContainer Creating SolrCore 'control_collection_shard1_replica_n1'
using configuration from collection control_collection, trusted=true
[junit4] 2> 3187147 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.control_collection.shard1.replica_n1' (registry
'solr.core.control_collection.shard1.replica_n1') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3187147 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory
solr.hdfs.home=hdfs://localhost:52191/solr_hdfs_home
[junit4] 2> 3187147 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled
[junit4] 2> 3187147 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrCore solr.RecoveryStrategy.Builder
[junit4] 2> 3187147 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SolrCore [[control_collection_shard1_replica_n1] ] Opening new SolrCore
at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/control-001/cores/control_collection_shard1_replica_n1],
dataDir=[hdfs://localhost:52191/solr_hdfs_home/control_collection/core_node2/data/]
[junit4] 2> 3187148 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/control_collection/core_node2/data/snapshot_metadata
[junit4] 2> 3187156 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct
memory allocation set to [true]
[junit4] 2> 3187156 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of
[8388608] will allocate [1] slabs and use ~[8388608] bytes
[junit4] 2> 3187156 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
[junit4] 2> 3187165 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.b.BlockDirectory Block cache on write is disabled
[junit4] 2> 3187166 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/control_collection/core_node2/data
[junit4] 2> 3187180 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/control_collection/core_node2/data/index
[junit4] 2> 3187193 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct
memory allocation set to [true]
[junit4] 2> 3187193 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of
[8388608] will allocate [1] slabs and use ~[8388608] bytes
[junit4] 2> 3187193 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
[junit4] 2> 3187199 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.b.BlockDirectory Block cache on write is disabled
[junit4] 2> 3187200 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=50, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 3187220 INFO (Block report processor) [ ] BlockStateChange
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:57980 is added to
blk_1073741825_1001{UCState=UNDER_CONSTRUCTION, truncateBlock=null,
primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-faf1db4b-7f8e-42ed-9df3-9939a6b8c1a4:NORMAL:127.0.0.1:39124|RBW],
ReplicaUC[[DISK]DS-f351dada-f296-4537-8f9d-185b1b50bd01:NORMAL:127.0.0.1:57980|FINALIZED]]}
size 0
[junit4] 2> 3187224 INFO (Block report processor) [ ] BlockStateChange
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:39124 is added to
blk_1073741825_1001{UCState=UNDER_CONSTRUCTION, truncateBlock=null,
primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-f351dada-f296-4537-8f9d-185b1b50bd01:NORMAL:127.0.0.1:57980|FINALIZED],
ReplicaUC[[DISK]DS-ee411a95-0395-42e9-8683-a13d875dc331:NORMAL:127.0.0.1:39124|FINALIZED]]}
size 0
[junit4] 2> 3187230 WARN (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 3187264 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateHandler Using UpdateLog implementation:
org.apache.solr.update.HdfsUpdateLog
[junit4] 2> 3187264 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateLog Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH
numRecordsToKeep=100 maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 3187264 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.HdfsUpdateLog Initializing HdfsUpdateLog: tlogDfsReplication=2
[junit4] 2> 3187273 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.CommitTracker Hard AutoCommit: disabled
[junit4] 2> 3187273 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.CommitTracker Soft AutoCommit: disabled
[junit4] 2> 3187274 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy:
minMergeSize=1000, mergeFactor=7, maxMergeSize=9223372036854775807,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.0]
[junit4] 2> 3187278 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@62964097[control_collection_shard1_replica_n1] main]
[junit4] 2> 3187279 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 3187280 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 3187280 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 3187281 INFO
(searcherExecutor-3462-thread-1-processing-n:127.0.0.1:48789_
x:control_collection_shard1_replica_n1 c:control_collection s:shard1)
[n:127.0.0.1:48789_ c:control_collection s:shard1
x:control_collection_shard1_replica_n1] o.a.s.c.SolrCore
[control_collection_shard1_replica_n1] Registered new searcher
Searcher@62964097[control_collection_shard1_replica_n1]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 3187281 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.u.UpdateLog Could not find max version in index or recent updates, using
new clock 1596675021142491136
[junit4] 2> 3187285 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ZkShardTerms Successful update of terms at
/collections/control_collection/terms/shard1 to Terms{values={core_node2=0},
version=0}
[junit4] 2> 3187287 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 3187287 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 3187287 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy Sync replicas to
http://127.0.0.1:48789/control_collection_shard1_replica_n1/
[junit4] 2> 3187287 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy Sync Success - now sync replicas to me
[junit4] 2> 3187287 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.SyncStrategy
http://127.0.0.1:48789/control_collection_shard1_replica_n1/ has no replicas
[junit4] 2> 3187287 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in
election, clear LIR
[junit4] 2> 3187289 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:48789/control_collection_shard1_replica_n1/ shard1
[junit4] 2> 3187391 INFO (zkCallback-1960-thread-2) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 3187440 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.c.ZkController I am the leader, no recovery necessary
[junit4] 2> 3187441 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_
c:control_collection s:shard1 x:control_collection_shard1_replica_n1]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&newCollection=true&name=control_collection_shard1_replica_n1&action=CREATE&numShards=1&collection=control_collection&shard=shard1&wt=javabin&version=2&replicaType=NRT}
status=0 QTime=1442
[junit4] 2> 3187444 INFO (qtp1257677904-119861) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
30 seconds. Check all shard replicas
[junit4] 2> 3187542 INFO (zkCallback-1960-thread-2) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/control_collection/state.json] for collection
[control_collection] has occurred - updating... (live nodes size: [1])
[junit4] 2> 3187894 INFO
(OverseerCollectionConfigSetProcessor-73562291366002692-127.0.0.1:48789_-n_0000000000)
[ ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000000 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 3188444 INFO (qtp1257677904-119861) [n:127.0.0.1:48789_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={replicationFactor=1&collection.configName=conf1&name=control_collection&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=127.0.0.1:48789_&wt=javabin&version=2}
status=0 QTime=2553
[junit4] 2> 3188447 INFO (zkConnectionManagerCallback-1974-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3188448 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3188449 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:60623/solr ready
[junit4] 2> 3188449 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.ChaosMonkey
monkey: init - expire sessions:false cause connection loss:false
[junit4] 2> 3188449 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :create with params
replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 3188451 INFO (OverseerThreadFactory-3459-thread-2) [ ]
o.a.s.c.a.c.CreateCollectionCmd Create collection collection1
[junit4] 2> 3188451 WARN (OverseerThreadFactory-3459-thread-2) [ ]
o.a.s.c.a.c.CreateCollectionCmd It is unusual to create a collection
(collection1) without cores.
[junit4] 2> 3188656 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CollectionsHandler Wait for new collection to be active for at most
30 seconds. Check all shard replicas
[junit4] 2> 3188657 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={replicationFactor=1&collection.configName=conf1&name=collection1&nrtReplicas=1&action=CREATE&numShards=1&createNodeSet=&stateFormat=2&wt=javabin&version=2}
status=0 QTime=207
[junit4] 2> 3188746 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 1 in directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-1-001
of type TLOG
[junit4] 2> 3188746 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.Server
jetty-9.4.8.v20171121, build timestamp: 2017-11-21T14:27:37-07:00, git hash:
82b8fb23f757335bb3329d540ce37a2a2615f0a8
[junit4] 2> 3188747 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session
DefaultSessionIdManager workerName=node0
[junit4] 2> 3188747 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session No
SessionScavenger set, using defaults
[junit4] 2> 3188747 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session
Scavenging every 600000ms
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@28798a47{/,null,AVAILABLE}
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@4bc36fe7{HTTP/1.1,[http/1.1]}{127.0.0.1:60204}
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.Server
Started @3188789ms
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=hdfs://localhost:52191/hdfs__localhost_52191__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-master_checkout_solr_build_solr-core_test_J0_temp_solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001_tempDir-002_jetty1,
replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=60204,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-1-001/cores}
[junit4] 2> 3188748 ERROR
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter Using logger factory
org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
8.0.0
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 3188748 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2018-04-02T22:26:00.778Z
[junit4] 2> 3188750 INFO (zkConnectionManagerCallback-1976-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3188751 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 3188751 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-1-001/solr.xml
[junit4] 2> 3188754 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 3188754 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 3188755 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0, but no JMX
reporters were configured - adding default JMX reporter.
[junit4] 2> 3188758 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.ZkContainer
Zookeeper client=127.0.0.1:60623/solr
[junit4] 2> 3188759 INFO (zkConnectionManagerCallback-1980-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3188761 INFO (zkConnectionManagerCallback-1982-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3188765 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (1)
[junit4] 2> 3188767 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 3188769 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 3188769 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:60204_
[junit4] 2> 3188770 INFO (zkCallback-1960-thread-2) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 3188773 INFO (zkCallback-1973-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 3188775 INFO (zkCallback-1965-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 3188785 INFO (zkCallback-1981-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (1) -> (2)
[junit4] 2> 3188839 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node')
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3188847 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm')
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3188847 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3188848 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-1-001/cores
[junit4] 2> 3188851 INFO (zkConnectionManagerCallback-1987-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3188852 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 3188852 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:60204_ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:60623/solr ready
[junit4] 2> 3188873 INFO (qtp1257677904-119863) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params
node=127.0.0.1:60204_&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 3188875 INFO
(OverseerCollectionConfigSetProcessor-73562291366002692-127.0.0.1:48789_-n_0000000000)
[ ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000002 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 3188876 INFO (OverseerThreadFactory-3459-thread-3) [ ]
o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:60204_ for creating new
replica
[junit4] 2> 3188878 INFO (qtp569252489-119923) [n:127.0.0.1:60204_ ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG
[junit4] 2> 3188985 INFO (zkCallback-1981-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/collection1/state.json] for collection [collection1] has
occurred - updating... (live nodes size: [2])
[junit4] 2> 3189892 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 8.0.0
[junit4] 2> 3189904 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.s.IndexSchema
[collection1_shard1_replica_t21] Schema name=test
[junit4] 2> 3190010 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.s.IndexSchema
Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 3190020 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.CoreContainer
Creating SolrCore 'collection1_shard1_replica_t21' using configuration from
collection collection1, trusted=true
[junit4] 2> 3190020 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.collection1.shard1.replica_t21' (registry
'solr.core.collection1.shard1.replica_t21') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3190020 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory
solr.hdfs.home=hdfs://localhost:52191/solr_hdfs_home
[junit4] 2> 3190020 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled
[junit4] 2> 3190020 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 3190020 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SolrCore
[[collection1_shard1_replica_t21] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-1-001/cores/collection1_shard1_replica_t21],
dataDir=[hdfs://localhost:52191/solr_hdfs_home/collection1/core_node22/data/]
[junit4] 2> 3190021 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/collection1/core_node22/data/snapshot_metadata
[junit4] 2> 3190028 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct
memory allocation set to [true]
[junit4] 2> 3190028 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of
[8388608] will allocate [1] slabs and use ~[8388608] bytes
[junit4] 2> 3190028 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
[junit4] 2> 3190034 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.s.b.BlockDirectory Block cache on write is disabled
[junit4] 2> 3190034 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/collection1/core_node22/data
[junit4] 2> 3190053 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/collection1/core_node22/data/index
[junit4] 2> 3190064 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct
memory allocation set to [true]
[junit4] 2> 3190064 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of
[8388608] will allocate [1] slabs and use ~[8388608] bytes
[junit4] 2> 3190065 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
[junit4] 2> 3190072 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.s.b.BlockDirectory Block cache on write is disabled
[junit4] 2> 3190073 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=50, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 3190102 INFO (Block report processor) [ ] BlockStateChange
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:57980 is added to
blk_1073741826_1002{UCState=UNDER_CONSTRUCTION, truncateBlock=null,
primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-ee411a95-0395-42e9-8683-a13d875dc331:NORMAL:127.0.0.1:39124|RBW],
ReplicaUC[[DISK]DS-0456f817-18e7-4482-8aeb-4db146ebe3ec:NORMAL:127.0.0.1:57980|FINALIZED]]}
size 0
[junit4] 2> 3190103 INFO (Block report processor) [ ] BlockStateChange
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:39124 is added to
blk_1073741826_1002{UCState=UNDER_CONSTRUCTION, truncateBlock=null,
primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-0456f817-18e7-4482-8aeb-4db146ebe3ec:NORMAL:127.0.0.1:57980|FINALIZED],
ReplicaUC[[DISK]DS-faf1db4b-7f8e-42ed-9df3-9939a6b8c1a4:NORMAL:127.0.0.1:39124|FINALIZED]]}
size 0
[junit4] 2> 3190106 WARN (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 3190177 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.u.UpdateHandler
Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog
[junit4] 2> 3190177 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 3190177 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.u.HdfsUpdateLog
Initializing HdfsUpdateLog: tlogDfsReplication=2
[junit4] 2> 3190186 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.u.CommitTracker
Hard AutoCommit: disabled
[junit4] 2> 3190186 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.u.CommitTracker
Soft AutoCommit: disabled
[junit4] 2> 3190189 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy:
minMergeSize=1000, mergeFactor=7, maxMergeSize=9223372036854775807,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.0]
[junit4] 2> 3190197 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@b86a5bd[collection1_shard1_replica_t21] main]
[junit4] 2> 3190198 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 3190198 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 3190198 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 3190199 INFO
(searcherExecutor-3473-thread-1-processing-n:127.0.0.1:60204_
x:collection1_shard1_replica_t21 c:collection1 s:shard1) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SolrCore
[collection1_shard1_replica_t21] Registered new searcher
Searcher@b86a5bd[collection1_shard1_replica_t21]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 3190199 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1596675024202235904
[junit4] 2> 3190203 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.ZkShardTerms
Successful update of terms at /collections/collection1/terms/shard1 to
Terms{values={core_node22=0}, version=0}
[junit4] 2> 3190205 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.ShardLeaderElectionContext Enough replicas found to continue.
[junit4] 2> 3190205 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.ShardLeaderElectionContext I may be the new leader - try and sync
[junit4] 2> 3190205 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SyncStrategy
Sync replicas to http://127.0.0.1:60204/collection1_shard1_replica_t21/
[junit4] 2> 3190205 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SyncStrategy
Sync Success - now sync replicas to me
[junit4] 2> 3190205 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.SyncStrategy
http://127.0.0.1:60204/collection1_shard1_replica_t21/ has no replicas
[junit4] 2> 3190205 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.ShardLeaderElectionContext Found all replicas participating in
election, clear LIR
[junit4] 2> 3190206 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.ZkController
collection1_shard1_replica_t21 stopping background replication from leader
[junit4] 2> 3190210 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21]
o.a.s.c.ShardLeaderElectionContext I am the new leader:
http://127.0.0.1:60204/collection1_shard1_replica_t21/ shard1
[junit4] 2> 3190311 INFO (zkCallback-1981-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/collection1/state.json] for collection [collection1] has
occurred - updating... (live nodes size: [2])
[junit4] 2> 3190360 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.c.ZkController
I am the leader, no recovery necessary
[junit4] 2> 3190362 INFO (qtp569252489-119923) [n:127.0.0.1:60204_
c:collection1 s:shard1 x:collection1_shard1_replica_t21] o.a.s.s.HttpSolrCall
[admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t21&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG}
status=0 QTime=1483
[junit4] 2> 3190364 INFO (qtp1257677904-119863) [n:127.0.0.1:48789_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={node=127.0.0.1:60204_&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2}
status=0 QTime=1491
[junit4] 2> 3190462 INFO (zkCallback-1981-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/collection1/state.json] for collection [collection1] has
occurred - updating... (live nodes size: [2])
[junit4] 2> 3190466 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.AbstractFullDistribZkTestBase create jetty 2 in directory
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-2-001
of type TLOG
[junit4] 2> 3190466 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.Server
jetty-9.4.8.v20171121, build timestamp: 2017-11-21T14:27:37-07:00, git hash:
82b8fb23f757335bb3329d540ce37a2a2615f0a8
[junit4] 2> 3190467 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session
DefaultSessionIdManager workerName=node0
[junit4] 2> 3190467 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session No
SessionScavenger set, using defaults
[junit4] 2> 3190467 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.session
Scavenging every 600000ms
[junit4] 2> 3190468 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.e.j.s.h.ContextHandler Started
o.e.j.s.ServletContextHandler@30c84fb7{/,null,AVAILABLE}
[junit4] 2> 3190468 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.e.j.s.AbstractConnector Started
ServerConnector@6a053a43{HTTP/1.1,[http/1.1]}{127.0.0.1:54936}
[junit4] 2> 3190468 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.e.j.s.Server
Started @3190509ms
[junit4] 2> 3190468 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.c.s.e.JettySolrRunner Jetty properties:
{solr.data.dir=hdfs://localhost:52191/hdfs__localhost_52191__x1_jenkins_jenkins-slave_workspace_Lucene-Solr-NightlyTests-master_checkout_solr_build_solr-core_test_J0_temp_solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001_tempDir-002_jetty2,
replicaType=TLOG, solrconfig=solrconfig.xml, hostContext=/, hostPort=54936,
coreRootDirectory=/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-2-001/cores}
[junit4] 2> 3190468 ERROR
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.u.StartupLoggingUtils Missing Java Option solr.log.dir. Logging may be
missing or incomplete.
[junit4] 2> 3190473 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter Using logger factory
org.apache.logging.slf4j.Log4jLoggerFactory
[junit4] 2> 3190473 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter ___ _ Welcome to Apache Solr™ version
8.0.0
[junit4] 2> 3190473 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter / __| ___| |_ _ Starting in cloud mode on port null
[junit4] 2> 3190473 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter \__ \/ _ \ | '_| Install dir: null
[junit4] 2> 3190473 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter |___/\___/_|_| Start time:
2018-04-02T22:26:02.503Z
[junit4] 2> 3190475 INFO (zkConnectionManagerCallback-1989-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3190476 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ]
o.a.s.s.SolrDispatchFilter Loading solr.xml from SolrHome (not found in
ZooKeeper)
[junit4] 2> 3190476 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Loading container configuration from
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-2-001/solr.xml
[junit4] 2> 3190479 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Configuration parameter autoReplicaFailoverWorkLoopDelay is ignored
[junit4] 2> 3190479 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
Configuration parameter autoReplicaFailoverBadNodeExpiration is ignored
[junit4] 2> 3190480 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.SolrXmlConfig
MBean server found: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0, but no JMX
reporters were configured - adding default JMX reporter.
[junit4] 2> 3190486 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [ ] o.a.s.c.ZkContainer
Zookeeper client=127.0.0.1:60623/solr
[junit4] 2> 3190487 INFO (zkConnectionManagerCallback-1993-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3190488 INFO (zkConnectionManagerCallback-1995-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3190492 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (2)
[junit4] 2> 3190494 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.Overseer Overseer (id=null) closing
[junit4] 2> 3190495 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.TransientSolrCoreCacheDefault Allocating transient cache for 4
transient cores
[junit4] 2> 3190495 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.ZkController Register node as live in
ZooKeeper:/live_nodes/127.0.0.1:54936_
[junit4] 2> 3190496 INFO (zkCallback-1965-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 3190496 INFO (zkCallback-1960-thread-2) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 3190496 INFO (zkCallback-1973-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 3190497 INFO (zkCallback-1986-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 3190497 INFO (zkCallback-1981-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 3190498 INFO (zkCallback-1994-thread-1) [ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (2) -> (3)
[junit4] 2> 3190588 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.node' (registry 'solr.node')
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3190596 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jvm' (registry 'solr.jvm')
enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3190596 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.m.r.SolrJmxReporter JMX monitoring for 'solr.jetty' (registry
'solr.jetty') enabled at server: com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3190598 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.CorePropertiesLocator Found 0 core definitions underneath
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-2-001/cores
[junit4] 2> 3190601 INFO (zkConnectionManagerCallback-2000-thread-1) [
] o.a.s.c.c.ConnectionManager zkClient has connected
[junit4] 2> 3190602 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.c.ZkStateReader Updated live nodes from ZooKeeper... (0) -> (3)
[junit4] 2> 3190603 INFO
(TEST-StressHdfsTest.test-seed#[5A3400F4114CAED4]) [n:127.0.0.1:54936_ ]
o.a.s.c.s.i.ZkClientClusterStateProvider Cluster at 127.0.0.1:60623/solr ready
[junit4] 2> 3190632 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.h.a.CollectionsHandler Invoked Collection Action :addreplica with params
node=127.0.0.1:54936_&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2
and sendToOCPQueue=true
[junit4] 2> 3190633 INFO
(OverseerCollectionConfigSetProcessor-73562291366002692-127.0.0.1:48789_-n_0000000000)
[ ] o.a.s.c.OverseerTaskQueue Response ZK path:
/overseer/collection-queue-work/qnr-0000000004 doesn't exist. Requestor may
have disconnected from ZooKeeper
[junit4] 2> 3190634 INFO (OverseerThreadFactory-3459-thread-4) [ ]
o.a.s.c.a.c.AddReplicaCmd Node Identified 127.0.0.1:54936_ for creating new
replica
[junit4] 2> 3190636 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_ ]
o.a.s.h.a.CoreAdminOperation core create command
qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t23&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG
[junit4] 2> 3190741 INFO (zkCallback-1981-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/collection1/state.json] for collection [collection1] has
occurred - updating... (live nodes size: [3])
[junit4] 2> 3190741 INFO (zkCallback-1994-thread-1) [ ]
o.a.s.c.c.ZkStateReader A cluster state change: [WatchedEvent
state:SyncConnected type:NodeDataChanged
path:/collections/collection1/state.json] for collection [collection1] has
occurred - updating... (live nodes size: [3])
[junit4] 2> 3191650 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.SolrConfig
Using Lucene MatchVersion: 8.0.0
[junit4] 2> 3191662 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.s.IndexSchema
[collection1_shard1_replica_t23] Schema name=test
[junit4] 2> 3191766 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.s.IndexSchema
Loaded schema test/1.0 with uniqueid field id
[junit4] 2> 3191777 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.CoreContainer
Creating SolrCore 'collection1_shard1_replica_t23' using configuration from
collection collection1, trusted=true
[junit4] 2> 3191778 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.m.r.SolrJmxReporter JMX monitoring for
'solr.core.collection1.shard1.replica_t23' (registry
'solr.core.collection1.shard1.replica_t23') enabled at server:
com.sun.jmx.mbeanserver.JmxMBeanServer@22e258d0
[junit4] 2> 3191778 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory
solr.hdfs.home=hdfs://localhost:52191/solr_hdfs_home
[junit4] 2> 3191778 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Solr Kerberos Authentication disabled
[junit4] 2> 3191778 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.SolrCore
solr.RecoveryStrategy.Builder
[junit4] 2> 3191778 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.SolrCore
[[collection1_shard1_replica_t23] ] Opening new SolrCore at
[/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/J0/temp/solr.cloud.hdfs.StressHdfsTest_5A3400F4114CAED4-001/shard-2-001/cores/collection1_shard1_replica_t23],
dataDir=[hdfs://localhost:52191/solr_hdfs_home/collection1/core_node24/data/]
[junit4] 2> 3191779 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/collection1/core_node24/data/snapshot_metadata
[junit4] 2> 3191785 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct
memory allocation set to [true]
[junit4] 2> 3191785 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of
[8388608] will allocate [1] slabs and use ~[8388608] bytes
[junit4] 2> 3191785 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
[junit4] 2> 3191801 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.s.b.BlockDirectory Block cache on write is disabled
[junit4] 2> 3191801 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/collection1/core_node24/data
[junit4] 2> 3191815 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory creating directory factory for path
hdfs://localhost:52191/solr_hdfs_home/collection1/core_node24/data/index
[junit4] 2> 3191820 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Number of slabs of block cache [1] with direct
memory allocation set to [true]
[junit4] 2> 3191820 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Block cache target memory usage, slab size of
[8388608] will allocate [1] slabs and use ~[8388608] bytes
[junit4] 2> 3191820 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.HdfsDirectoryFactory Creating new single instance HDFS BlockCache
[junit4] 2> 3191863 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.s.b.BlockDirectory Block cache on write is disabled
[junit4] 2> 3191863 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogByteSizeMergePolicy: [LogByteSizeMergePolicy:
minMergeSize=1677721, mergeFactor=50, maxMergeSize=2147483648,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=1.0]
[junit4] 2> 3191870 INFO (Block report processor) [ ] BlockStateChange
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:39124 is added to
blk_1073741827_1003{UCState=UNDER_CONSTRUCTION, truncateBlock=null,
primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-f351dada-f296-4537-8f9d-185b1b50bd01:NORMAL:127.0.0.1:57980|RBW],
ReplicaUC[[DISK]DS-ee411a95-0395-42e9-8683-a13d875dc331:NORMAL:127.0.0.1:39124|RBW]]}
size 0
[junit4] 2> 3191871 INFO (Block report processor) [ ] BlockStateChange
BLOCK* addStoredBlock: blockMap updated: 127.0.0.1:57980 is added to
blk_1073741827_1003{UCState=UNDER_CONSTRUCTION, truncateBlock=null,
primaryNodeIndex=-1,
replicas=[ReplicaUC[[DISK]DS-f351dada-f296-4537-8f9d-185b1b50bd01:NORMAL:127.0.0.1:57980|RBW],
ReplicaUC[[DISK]DS-ee411a95-0395-42e9-8683-a13d875dc331:NORMAL:127.0.0.1:39124|RBW]]}
size 0
[junit4] 2> 3191874 WARN (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.c.RequestHandlers INVALID paramSet a in requestHandler {type =
requestHandler,name = /dump,class = DumpRequestHandler,attributes =
{initParams=a, name=/dump, class=DumpRequestHandler},args =
{defaults={a=A,b=B}}}
[junit4] 2> 3191935 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.u.UpdateHandler
Using UpdateLog implementation: org.apache.solr.update.HdfsUpdateLog
[junit4] 2> 3191935 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.u.UpdateLog
Initializing UpdateLog: dataDir= defaultSyncLevel=FLUSH numRecordsToKeep=100
maxNumLogsToKeep=10 numVersionBuckets=65536
[junit4] 2> 3191935 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.u.HdfsUpdateLog
Initializing HdfsUpdateLog: tlogDfsReplication=2
[junit4] 2> 3191948 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.u.CommitTracker
Hard AutoCommit: disabled
[junit4] 2> 3191948 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.u.CommitTracker
Soft AutoCommit: disabled
[junit4] 2> 3191950 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.u.RandomMergePolicy RandomMergePolicy wrapping class
org.apache.lucene.index.LogDocMergePolicy: [LogDocMergePolicy:
minMergeSize=1000, mergeFactor=7, maxMergeSize=9223372036854775807,
maxMergeSizeForForcedMerge=9223372036854775807, calibrateSizeByDeletes=false,
maxMergeDocs=2147483647, maxCFSSegmentSizeMB=8.796093022207999E12,
noCFSRatio=0.0]
[junit4] 2> 3191954 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.s.SolrIndexSearcher Opening
[Searcher@52bf152a[collection1_shard1_replica_t23] main]
[junit4] 2> 3191955 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.r.ManagedResourceStorage Configured ZooKeeperStorageIO with znodeBase:
/configs/conf1
[junit4] 2> 3191956 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.r.ManagedResourceStorage Loaded null at path _rest_managed.json using
ZooKeeperStorageIO:path=/configs/conf1
[junit4] 2> 3191956 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23]
o.a.s.h.ReplicationHandler Commits will be reserved for 10000ms.
[junit4] 2> 3191957 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.u.UpdateLog
Could not find max version in index or recent updates, using new clock
1596675026045632512
[junit4] 2> 3191958 INFO
(searcherExecutor-3484-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.SolrCore
[collection1_shard1_replica_t23] Registered new searcher
Searcher@52bf152a[collection1_shard1_replica_t23]
main{ExitableDirectoryReader(UninvertingDirectoryReader())}
[junit4] 2> 3191960 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.ZkShardTerms
Successful update of terms at /collections/collection1/terms/shard1 to
Terms{values={core_node24=0, core_node22=0}, version=1}
[junit4] 2> 3191961 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.c.ZkController
Core needs to recover:collection1_shard1_replica_t23
[junit4] 2> 3191961 INFO
(updateExecutor-1990-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1) [n:127.0.0.1:54936_
c:collection1 s:shard1 r:core_node24 x:collection1_shard1_replica_t23]
o.a.s.u.DefaultSolrCoreState Running recovery
[junit4] 2> 3191962 INFO (qtp1673250412-119966) [n:127.0.0.1:54936_
c:collection1 s:shard1 x:collection1_shard1_replica_t23] o.a.s.s.HttpSolrCall
[admin] webapp=null path=/admin/cores
params={qt=/admin/cores&collection.configName=conf1&name=collection1_shard1_replica_t23&action=CREATE&collection=collection1&shard=shard1&wt=javabin&version=2&replicaType=TLOG}
status=0 QTime=1325
[junit4] 2> 3191962 INFO
(recoveryExecutor-1991-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1 r:core_node24)
[n:127.0.0.1:54936_ c:collection1 s:shard1 r:core_node24
x:collection1_shard1_replica_t23] o.a.s.c.RecoveryStrategy Starting recovery
process. recoveringAfterStartup=true
[junit4] 2> 3191963 INFO
(recoveryExecutor-1991-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1 r:core_node24)
[n:127.0.0.1:54936_ c:collection1 s:shard1 r:core_node24
x:collection1_shard1_replica_t23] o.a.s.c.RecoveryStrategy ######
startupVersions=[[]]
[junit4] 2> 3191963 INFO
(recoveryExecutor-1991-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1 r:core_node24)
[n:127.0.0.1:54936_ c:collection1 s:shard1 r:core_node24
x:collection1_shard1_replica_t23] o.a.s.c.ZkController
collection1_shard1_replica_t23 stopping background replication from leader
[junit4] 2> 3191967 INFO (qtp569252489-119927) [n:127.0.0.1:60204_
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_t21]
o.a.s.c.S.Request [collection1_shard1_replica_t21] webapp= path=/admin/ping
params={wt=javabin&version=2} hits=0 status=0 QTime=0
[junit4] 2> 3191967 INFO (qtp569252489-119927) [n:127.0.0.1:60204_
c:collection1 s:shard1 r:core_node22 x:collection1_shard1_replica_t21]
o.a.s.c.S.Request [collection1_shard1_replica_t21] webapp= path=/admin/ping
params={wt=javabin&version=2} status=0 QTime=0
[junit4] 2> 3191968 INFO (qtp1257677904-119865) [n:127.0.0.1:48789_ ]
o.a.s.s.HttpSolrCall [admin] webapp=null path=/admin/collections
params={node=127.0.0.1:54936_&action=ADDREPLICA&collection=collection1&shard=shard1&type=TLOG&wt=javabin&version=2}
status=0 QTime=1336
[junit4] 2> 3191970 INFO
(recoveryExecutor-1991-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1 r:core_node24)
[n:127.0.0.1:54936_ c:collection1 s:shard1 r:core_node24
x:collection1_shard1_replica_t23] o.a.s.c.RecoveryStrategy Begin buffering
updates. core=[collection1_shard1_replica_t23]
[junit4] 2> 3191970 INFO
(recoveryExecutor-1991-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1 r:core_node24)
[n:127.0.0.1:54936_ c:collection1 s:shard1 r:core_node24
x:collection1_shard1_replica_t23] o.a.s.u.UpdateLog Starting to buffer updates.
HDFSUpdateLog{state=ACTIVE, tlog=null}
[junit4] 2> 3191970 INFO
(recoveryExecutor-1991-thread-1-processing-n:127.0.0.1:54936_
x:collection1_shard1_replica_t23 c:collection1 s:shard1 r:core_node24)
[n:127.0.0.1:54936_ c:collectio
[...truncated too long message...]
est params are: codec=Asserting(Lucene70): {field=PostingsFormat(name=Direct),
foo=PostingsFormat(name=Direct), id=PostingsFormat(name=Memory)}, docValues:{},
maxPointsInLeafNode=1936, maxMBSortInHeap=6.998831990999572,
sim=Asserting(org.apache.lucene.search.similarities.AssertingSimilarity@1f45de5e),
locale=ar-YE, timezone=America/Sitka
[junit4] 2> NOTE: Linux 3.13.0-88-generic amd64/Oracle Corporation
1.8.0_152 (64-bit)/cpus=4,threads=1,free=376373592,total=529006592
[junit4] 2> NOTE: All tests run in this JVM: [HdfsDirectoryTest,
TestAuthenticationFramework, TestUseDocValuesAsStored2, TestFieldCache,
CollectionTooManyReplicasTest, TestCloudInspectUtil, SystemInfoHandlerTest,
SecurityConfHandlerTest, DistributedIntervalFacetingTest, TestCustomStream,
SpellCheckCollatorWithCollapseTest, SyncSliceTest, TestCloudManagedSchema,
XsltUpdateRequestHandlerTest, TestAddFieldRealTimeGet,
TestCursorMarkWithoutUniqueKey, TestManagedResource, CoreAdminOperationTest,
TestNRTOpen, CloudExitableDirectoryReaderTest, TestReload, RequiredFieldsTest,
TestTolerantUpdateProcessorRandomCloud, TestCloudPseudoReturnFields,
PKIAuthenticationIntegrationTest, DirectSolrConnectionTest, MoveReplicaTest,
RemoteQueryErrorTest, TestElisionMultitermQuery, TestPostingsSolrHighlighter,
TestEmbeddedSolrServerSchemaAPI, OverseerTest, ExecutePlanActionTest,
TestFileDictionaryLookup, SampleTest, TestSerializedLuceneMatchVersion,
TestSurroundQueryParser, SoftAutoCommitTest, HDFSCollectionsAPITest,
TestSchemalessBufferedUpdates, TestSolrCloudSnapshots,
TestMultiValuedNumericRangeQuery, TestSearcherReuse, PrimitiveFieldTypeTest,
JsonLoaderTest, ActionThrottleTest, CdcrBidirectionalTest,
TestRebalanceLeaders, AutoAddReplicasPlanActionTest, TestQueryUtils,
TestPartialUpdateDeduplication, TestReversedWildcardFilterFactory,
ScriptEngineTest, SimplePostToolTest, DeleteReplicaTest, CdcrUpdateLogTest,
DOMUtilTest, TestReplicationHandlerBackup,
SuggestComponentContextFilterQueryTest, TestHalfAndHalfDocValues,
CSVRequestHandlerTest, TestSystemIdResolver, TestClassicSimilarityFactory,
TestLegacyFieldReuse, TestXmlQParser, TestJsonFacetRefinement,
CdcrReplicationHandlerTest, TestSubQueryTransformer, QueryEqualityTest,
TestStressVersions, TestGeoJSONResponseWriter, SuggesterFSTTest,
TestNestedDocsSort, TestHdfsUpdateLog, TestLRUCache, DistributedQueueTest,
SolrSlf4jReporterTest, TestApiFramework, TestMacros, AlternateDirectoryTest,
TermsComponentTest, TestTrackingShardHandlerFactory, TestQueryWrapperFilter,
TestNodeAddedTrigger, ResponseBuilderTest, BitVectorTest, V2StandaloneTest,
OverseerModifyCollectionTest, PreAnalyzedUpdateProcessorTest, TestJsonRequest,
TestCharFilters, SolrCoreTest, TestPullReplicaErrorHandling,
CurrencyRangeFacetCloudTest, TestSimDistributedQueue, ZkControllerTest,
TriLevelCompositeIdRoutingTest, HdfsUnloadDistributedZkTest,
WrapperMergePolicyFactoryTest, TestSolrConfigHandlerConcurrent,
TestCollectionsAPIViaSolrCloudCluster, MetricsConfigTest,
TestClusterStateProvider, TestStandardQParsers, BlockCacheTest,
TestImplicitCoreProperties, TestRuleBasedAuthorizationPlugin,
SolrIndexMetricsTest, TestFilteredDocIdSet, InfoHandlerTest,
ConfigureRecoveryStrategyTest, DistributedExpandComponentTest,
TestFunctionQuery, QueryElevationComponentTest, TestBackupRepositoryFactory,
PeerSyncTest, ZkShardTermsTest, MergeStrategyTest, TestExpandComponent,
SolrMetricReporterTest, DistributedFacetPivotLongTailTest,
SignatureUpdateProcessorFactoryTest, DocumentBuilderTest,
TestBM25SimilarityFactory, TestOnReconnectListenerSupport,
ConcurrentDeleteAndCreateCollectionTest, TestLegacyFieldCache,
TestXIncludeConfig, TestSkipOverseerOperations, CollectionsAPISolrJTest,
TestZkChroot, TestSegmentSorting, ChaosMonkeySafeLeaderTest,
UnloadDistributedZkTest, BasicZkTest, FullSolrCloudDistribCmdsTest,
TestReplicationHandler, ZkSolrClientTest, ShardRoutingCustomTest,
TestDistributedSearch, TestDistributedGrouping, TestRecovery,
TestHashPartitioner, TermVectorComponentDistributedTest, TestJoin,
DistributedTermsComponentTest, TestSolr4Spatial, StatsComponentTest,
TestGroupingSearch, SolrCmdDistributorTest, BadIndexSchemaTest, TestFiltering,
BasicFunctionalityTest, HighlighterTest,
DistributedQueryElevationComponentTest, AnalysisAfterCoreReloadTest,
TestExtendedDismaxParser, SpellCheckCollatorTest, TestFoldingMultitermQuery,
DocValuesTest, SuggesterTest, TestStressLucene, TestTrie, SuggesterWFSTTest,
TestCSVLoader, PolyFieldTest, NoCacheHeaderTest,
SchemaVersionSpecificBehaviorTest, SolrCoreCheckLockOnStartupTest,
FieldMutatingUpdateProcessorTest, TestAtomicUpdateErrorCases,
TestWordDelimiterFilterFactory, DirectUpdateHandlerOptimizeTest,
SortByFunctionTest, DistanceFunctionTest, TestSolrDeletionPolicy1,
SolrInfoBeanTest, IndexBasedSpellCheckerTest, LukeRequestHandlerTest,
TestWriterPerf, DirectSolrSpellCheckerTest, TestValueSourceCache,
PathHierarchyTokenizerFactoryTest, TermVectorComponentTest,
MoreLikeThisHandlerTest, TestSolrQueryParser, FastVectorHighlighterTest,
LoggingHandlerTest, IndexSchemaTest, TestJmxIntegration,
UpdateRequestProcessorFactoryTest, MBeansHandlerTest, QueryParsingTest,
UniqFieldsUpdateProcessorFactoryTest, BinaryUpdateRequestHandlerTest,
PingRequestHandlerTest, TestComponentsName, SearchHandlerTest,
HighlighterConfigTest, SOLR749Test, UpdateParamsTest, TestSolrIndexConfig,
CopyFieldTest, SolrIndexConfigTest, BadComponentTest, TestStressRecovery,
TestMergePolicyConfig, MultiTermTest, TestDocSet, TestSearchPerf,
NumericFieldsTest, MinimalSchemaTest, TestConfig, TestFuzzyAnalyzedSuggestions,
ExternalFileFieldSortTest, TestSolrCoreProperties, TestLuceneMatchVersion,
TestSweetSpotSimilarityFactory, TestPerFieldSimilarity,
TestLMJelinekMercerSimilarityFactory, TestFastWriter, ResourceLoaderTest,
TestFastOutputStream, OpenExchangeRatesOrgProviderTest, PluginInfoTest,
TestFastLRUCache, DateMathParserTest, PreAnalyzedFieldTest, PrimUtilsTest,
TestSuggestSpellingConverter, SpellingQueryConverterTest, ClusterStateTest,
TestSolrJ, ZkNodePropsTest, SliceStateTest, FileUtilsTest, CircularListTest,
DistributedMLTComponentTest, TestRTGBase, CursorPagingTest,
TestDistributedMissingSort, TestSimpleTrackingShardHandler, TokenizerChainTest,
TestLuceneIndexBackCompat, TestEmbeddedSolrServerAdminHandler,
TestEmbeddedSolrServerConstructors, AliasIntegrationTest,
AssignBackwardCompatibilityTest, AsyncCallRequestStatusResponseTest,
ChaosMonkeyNothingIsSafeWithPullReplicasTest,
ChaosMonkeySafeLeaderWithPullReplicasTest, CollectionPropsTest,
ConfigSetsAPITest, ConnectionManagerTest, DeleteInactiveReplicaTest,
DeleteLastCustomShardedReplicaTest, DeleteNodeTest, DeleteShardTest,
DeleteStatusTest, DistribCursorPagingTest,
DistribDocExpirationUpdateProcessorTest, DistribJoinFromCollectionTest,
DocValuesNotIndexedTest, ForceLeaderTest, HealthCheckHandlerTest,
HttpPartitionOnCommitTest, HttpPartitionTest, LIROnShardRestartTest,
ReplaceNodeNoTargetTest, ReplaceNodeTest, ReplicationFactorTest,
RollingRestartTest, TestTlogReplica, CollectionsAPIDistributedZkTest,
NodeAddedTriggerIntegrationTest, NodeAddedTriggerTest,
ScheduledMaintenanceTriggerTest, ScheduledTriggerIntegrationTest,
ScheduledTriggerTest, TriggerIntegrationTest, HdfsRecoverLeaseTest,
HdfsWriteToMultipleCollectionsTest, TestConfigReload, TestSQLHandler,
TestCoreAdminApis, CustomHighlightComponentTest,
DistributedFacetPivotWhiteBoxTest, DistributedSuggestComponentTest,
TestUnifiedSolrHighlighter, TestLegacyNumericUtils, TestLegacyTerms,
TestNumericRangeQuery32, TestNumericTokenStream, JvmMetricsTest,
SolrGraphiteReporterTest, SolrJmxReporterCloudTest, SolrCloudReportersTest,
SolrShardReporterTest, JSONWriterTest, TestBinaryResponseWriter,
TestExportWriter, TestGraphMLResponseWriter, TestPushWriter,
TestRawTransformer, TestRetrieveFieldsOptimizer,
TestSubQueryTransformerDistrib, TestBulkSchemaAPI,
TestCopyFieldCollectionResource, TestDynamicFieldResource,
TestFieldTypeCollectionResource, TestSchemaResource,
TestSchemaSimilarityResource, TestManagedStopFilterFactory,
TestManagedSynonymFilterFactory, BooleanFieldTest, ChangedSchemaMergeTest,
DateRangeFieldTest, EnumFieldTest, ManagedSchemaRoundRobinCloudTest,
SpatialRPTFieldTypeTest, TestCloudSchemaless, TestComplexPhraseQParserPlugin,
TestInitQParser, TestMultiWordSynonyms,
TestOverriddenPrefixQueryForCustomFieldType, TestPayloadCheckQParserPlugin,
TestRandomCollapseQParserPlugin, TestOrdValues, BJQParserTest,
TestCloudNestedDocsSort, TestScoreJoinQPScore, SimpleMLTQParserTest,
TestPerFieldSimilarityWithDefaultOverride, TestDefaultStatsCache,
TestLRUStatsCache, TestDelegationWithHadoopAuth, TestZkAclsWithHadoopAuth,
HttpSolrCallGetCoreTest, BufferStoreTest, TestDocTermOrds]
[junit4] Completed [769/795 (2!)] on J1 in 58.74s, 10 tests, 1 error <<<
FAILURES!
[...truncated 80 lines...]
[junit4] JVM J1: stdout was not empty, see:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/solr/build/solr-core/test/temp/junit4-J1-20180402_213251_935616979699388231223.sysout
[junit4] >>> JVM J1 emitted unexpected output (verbatim) ----
[junit4] java.lang.OutOfMemoryError: GC overhead limit exceeded
[junit4] Dumping heap to
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/heapdumps/java_pid9147.hprof
...
[junit4] Heap dump file created [595147189 bytes in 1.584 secs]
[junit4] <<< JVM J1: EOF ----
[...truncated 9290 lines...]
BUILD FAILED
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/build.xml:651:
The following error occurred while executing this line:
/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-NightlyTests-master/checkout/build.xml:585:
Some of the tests produced a heap dump, but did not fail. Maybe a suppressed
OutOfMemoryError? Dumps created:
* java_pid9147.hprof
Total time: 244 minutes 35 seconds
Build step 'Invoke Ant' marked build as failure
Archiving artifacts
Recording test results
Email was triggered for: Failure - Any
Sending email for trigger: Failure - Any
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]