[JENKINS] Lucene-Solr-BadApples-7.x-Linux (64bit/jdk-10) - Build # 19 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-BadApples-7.x-Linux/19/ Java: 64bit/jdk-10 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC 1 tests failed. FAILED: org.apache.solr.cloud.MoveReplicaHDFSTest.testFailedMove Error Message: No live SolrServers available to handle this request:[https://127.0.0.1:36799/solr/MoveReplicaHDFSTest_failed_coll_true, https://127.0.0.1:46389/solr/MoveReplicaHDFSTest_failed_coll_true] Stack Trace: org.apache.solr.client.solrj.SolrServerException: No live SolrServers available to handle this request:[https://127.0.0.1:36799/solr/MoveReplicaHDFSTest_failed_coll_true, https://127.0.0.1:46389/solr/MoveReplicaHDFSTest_failed_coll_true] at __randomizedtesting.SeedInfo.seed([D5531D6CCF2CCBD5:7F9ECE9E78FF1E05]:0) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:462) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1106) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:886) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:993) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:819) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrClient.query(SolrClient.java:942) at org.apache.solr.cloud.MoveReplicaTest.testFailedMove(MoveReplicaTest.java:308) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
[JENKINS] Lucene-Solr-7.x-MacOSX (64bit/jdk-9) - Build # 564 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-MacOSX/564/ Java: 64bit/jdk-9 -XX:+UseCompressedOops -XX:+UseG1GC 1 tests failed. FAILED: org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.test Error Message: expected:<4> but was:<3> Stack Trace: java.lang.AssertionError: expected:<4> but was:<3> at __randomizedtesting.SeedInfo.seed([A17AE08A339277C0:292EDF509D6E1A38]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.junit.Assert.assertEquals(Assert.java:456) at org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.assertInvariants(TimeRoutedAliasUpdateProcessorTest.java:343) at org.apache.solr.update.processor.TimeRoutedAliasUpdateProcessorTest.test(TimeRoutedAliasUpdateProcessorTest.java:169) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at
[JENKINS] Lucene-Solr-repro - Build # 452 - Unstable
Build: https://builds.apache.org/job/Lucene-Solr-repro/452/ [...truncated 28 lines...] [repro] Jenkins log URL: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-master/998/consoleText [repro] Revision: 0f53adbee49015aa01e8f66945f82e88a9172c7c [repro] Ant options: -DsmokeTestRelease.java9=/home/jenkins/tools/java/latest1.9 [repro] Repro line: ant test -Dtestcase=TestTriggerIntegration -Dtests.method=testEventQueue -Dtests.seed=CEA01ED2B23B4612 -Dtests.multiplier=2 -Dtests.locale=fi -Dtests.timezone=Europe/Copenhagen -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1 [repro] git rev-parse --abbrev-ref HEAD [repro] git rev-parse HEAD [repro] Initial local git branch/revision: 2ae488aae2a0601148dcd1b6aa794489a0572349 [repro] git fetch [...truncated 2 lines...] [repro] git checkout 0f53adbee49015aa01e8f66945f82e88a9172c7c [...truncated 2 lines...] [repro] git merge --ff-only [...truncated 1 lines...] [repro] ant clean [...truncated 6 lines...] [repro] Test suites by module: [repro]solr/core [repro] TestTriggerIntegration [repro] ant compile-test [...truncated 3297 lines...] [repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=5 -Dtests.class="*.TestTriggerIntegration" -Dtests.showOutput=onerror -DsmokeTestRelease.java9=/home/jenkins/tools/java/latest1.9 -Dtests.seed=CEA01ED2B23B4612 -Dtests.multiplier=2 -Dtests.locale=fi -Dtests.timezone=Europe/Copenhagen -Dtests.asserts=true -Dtests.file.encoding=ISO-8859-1 [...truncated 1986 lines...] [repro] Setting last failure code to 256 [repro] Failures: [repro] 1/5 failed: org.apache.solr.cloud.autoscaling.sim.TestTriggerIntegration [repro] git checkout 2ae488aae2a0601148dcd1b6aa794489a0572349 [...truncated 2 lines...] [repro] Exiting with code 256 [...truncated 5 lines...] - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-7.x - Build # 552 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/552/ 1 tests failed. FAILED: org.apache.solr.cloud.DocValuesNotIndexedTest.testGroupingDVOnly Error Message: Unexpected number of elements in the group for intGSF: 4 Stack Trace: java.lang.AssertionError: Unexpected number of elements in the group for intGSF: 4 at __randomizedtesting.SeedInfo.seed([1CD3400DBFA8F096:87682E55F2F0C2C8]:0) at org.junit.Assert.fail(Assert.java:93) at org.apache.solr.cloud.DocValuesNotIndexedTest.testGroupingDVOnly(DocValuesNotIndexedTest.java:379) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Build Log: [...truncated 13954 lines...] [junit4] Suite: org.apache.solr.cloud.DocValuesNotIndexedTest [junit4] 2> Creating dataDir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-Tests-7.x/solr/build/solr-core/test/J1/temp/solr.cloud.DocValuesNotIndexedTest_1CD3400DBFA8F096-001/init-core-data-001 [junit4] 2> 1707512 INFO
[JENKINS] Lucene-Solr-7.x-Windows (64bit/jdk-9.0.4) - Build # 533 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Windows/533/ Java: 64bit/jdk-9.0.4 -XX:+UseCompressedOops -XX:+UseSerialGC 7 tests failed. FAILED: org.apache.solr.cloud.autoscaling.ScheduledTriggerTest.testTrigger Error Message: expected:<3> but was:<2> Stack Trace: java.lang.AssertionError: expected:<3> but was:<2> at __randomizedtesting.SeedInfo.seed([A86A8FEB7F34064:694D9E7C2E3C3349]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.junit.Assert.assertEquals(Assert.java:456) at org.apache.solr.cloud.autoscaling.ScheduledTriggerTest.scheduledTriggerTest(ScheduledTriggerTest.java:111) at org.apache.solr.cloud.autoscaling.ScheduledTriggerTest.testTrigger(ScheduledTriggerTest.java:64) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) FAILED:
[JENKINS] Solr-Artifacts-master - Build # 3318 - Failure
Build: https://builds.apache.org/job/Solr-Artifacts-master/3318/ No tests ran. Build Log: [...truncated 116 lines...] - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-repro - Build # 449 - Unstable
Build: https://builds.apache.org/job/Lucene-Solr-repro/449/ [...truncated 28 lines...] [repro] Jenkins log URL: https://builds.apache.org/job/Lucene-Solr-BadApples-NightlyTests-master/6/consoleText [repro] Revision: 73d74107dcb2d836c541654e4bf99dc2e306cf75 [repro] Revision: 73d74107dcb2d836c541654e4bf99dc2e306cf75 [repro] Ant options: -Dtests.multiplier=2 -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt [repro] Repro line: ant test -Dtestcase=HdfsBasicDistributedZk2Test -Dtests.method=test -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=es-CU -Dtests.timezone=America/Guadeloupe -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] Repro line: ant test -Dtestcase=TestReplicationHandler -Dtests.method=doTestReplicateAfterCoreReload -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=es-BO -Dtests.timezone=America/Curacao -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] Repro line: ant test -Dtestcase=TestTlogReplica -Dtests.method=testRecovery -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=da -Dtests.timezone=America/Cordoba -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] Repro line: ant test -Dtestcase=FullSolrCloudDistribCmdsTest -Dtests.method=test -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=pl -Dtests.timezone=Brazil/DeNoronha -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] Repro line: ant test -Dtestcase=TestDocTermOrds -Dtests.method=testTriggerUnInvertLimit -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=sq-AL -Dtests.timezone=SystemV/AST4ADT -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] Repro line: ant test -Dtestcase=HdfsChaosMonkeyNothingIsSafeTest -Dtests.method=test -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=en-IE -Dtests.timezone=Asia/Damascus -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] Repro line: ant test -Dtestcase=HdfsChaosMonkeyNothingIsSafeTest -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.locale=en-IE -Dtests.timezone=Asia/Damascus -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] git rev-parse --abbrev-ref HEAD [repro] git rev-parse HEAD [repro] Initial local git branch/revision: 0f53adbee49015aa01e8f66945f82e88a9172c7c [repro] git fetch [...truncated 2 lines...] [repro] git checkout 73d74107dcb2d836c541654e4bf99dc2e306cf75 [...truncated 2 lines...] [repro] git merge --ff-only [...truncated 1 lines...] [repro] ant clean [...truncated 6 lines...] [repro] Test suites by module: [repro]solr/core [repro] TestReplicationHandler [repro] TestDocTermOrds [repro] HdfsChaosMonkeyNothingIsSafeTest [repro] TestTlogReplica [repro] HdfsBasicDistributedZk2Test [repro] FullSolrCloudDistribCmdsTest [repro] ant compile-test [...truncated 3297 lines...] [repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=30 -Dtests.class="*.TestReplicationHandler|*.TestDocTermOrds|*.HdfsChaosMonkeyNothingIsSafeTest|*.TestTlogReplica|*.HdfsBasicDistributedZk2Test|*.FullSolrCloudDistribCmdsTest" -Dtests.showOutput=onerror -Dtests.multiplier=2 -Dtests.linedocsfile=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-BadApples-NightlyTests-master/test-data/enwiki.random.lines.txt -Dtests.seed=3AD4C155A2C8DB65 -Dtests.multiplier=2 -Dtests.nightly=true -Dtests.slow=true -Dtests.badapples=true
[JENKINS-EA] Lucene-Solr-master-Windows (64bit/jdk-11-ea+5) - Build # 7256 - Still Unstable!
Error processing tokens: Error while parsing action 'Text/ZeroOrMore/FirstOf/Token/DelimitedToken/DelimitedToken_Action3' at input position (line 79, pos 4): )"} ^ java.lang.OutOfMemoryError: Java heap space - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-6305) Ability to set the replication factor for index files created by HDFSDirectoryFactory
[ https://issues.apache.org/jira/browse/SOLR-6305?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429550#comment-16429550 ] Shawn Heisey commented on SOLR-6305: I've never used HDFS, so I might not be contributing anything useful to this discussion. It was my understanding that if you configure HDFS to keep 3 copies, and then tell SolrCloud to use a replicationFactor of 3, that you would actually have nine copies -- SolrCloud would make its replicas just like it would on standard filesystems, and then HDFS would replicate each of the files in those indexes. Is that an incorrect understanding? > Ability to set the replication factor for index files created by > HDFSDirectoryFactory > - > > Key: SOLR-6305 > URL: https://issues.apache.org/jira/browse/SOLR-6305 > Project: Solr > Issue Type: Improvement > Components: hdfs > Environment: hadoop-2.2.0 >Reporter: Timothy Potter >Priority: Major > Attachments: > 0001-OIQ-23224-SOLR-6305-Fixed-SOLR-6305-by-reading-the-r.patch > > > HdfsFileWriter doesn't allow us to create files in HDFS with a different > replication factor than the configured DFS default because it uses: > {{FsServerDefaults fsDefaults = fileSystem.getServerDefaults(path);}} > Since we have two forms of replication going on when using > HDFSDirectoryFactory, it would be nice to be able to set the HDFS replication > factor for the Solr directories to a lower value than the default. I realize > this might reduce the chance of data locality but since Solr cores each have > their own path in HDFS, we should give operators the option to reduce it. > My original thinking was to just use Hadoop setrep to customize the > replication factor, but that's a one-time shot and doesn't affect new files > created. For instance, I did: > {{hadoop fs -setrep -R 1 solr49/coll1}} > My default dfs replication is set to 3 ^^ I'm setting it to 1 just as an > example > Then added some more docs to the coll1 and did: > {{hadoop fs -stat %r solr49/hdfs1/core_node1/data/index/segments_3}} > 3 <-- should be 1 > So it looks like new files don't inherit the repfact from their parent > directory. > Not sure if we need to go as far as allowing different replication factor per > collection but that should be considered if possible. > I looked at the Hadoop 2.2.0 code to see if there was a way to work through > this using the Configuration object but nothing jumped out at me ... and the > implementation for getServerDefaults(path) is just: > public FsServerDefaults getServerDefaults(Path p) throws IOException { > return getServerDefaults(); > } > Path is ignored ;-) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-SmokeRelease-7.x - Build # 194 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-7.x/194/ No tests ran. Build Log: [...truncated 23779 lines...] [asciidoctor:convert] asciidoctor: ERROR: about-this-guide.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.) [asciidoctor:convert] asciidoctor: ERROR: solr-glossary.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.) [java] Processed 2178 links (1735 relative) to 2887 anchors in 227 files [echo] Validated Links & Anchors via: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/build/solr-ref-guide/bare-bones-html/ -dist-changes: [copy] Copying 4 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/package/changes -dist-keys: [get] Getting: http://home.apache.org/keys/group/lucene.asc [get] To: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/package/KEYS package: -unpack-solr-tgz: -ensure-solr-tgz-exists: [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/build/solr.tgz.unpacked [untar] Expanding: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/package/solr-7.4.0.tgz into /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/build/solr.tgz.unpacked generate-maven-artifacts: resolve: resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml
[jira] [Resolved] (LUCENE-8226) Don't use MemoryCodec for nightly runs of TestIndexSorting
[ https://issues.apache.org/jira/browse/LUCENE-8226?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Alan Woodward resolved LUCENE-8226. --- Resolution: Fixed Assignee: Alan Woodward Fix Version/s: 7.4 Fixed by removing the NIGHTLY section. > Don't use MemoryCodec for nightly runs of TestIndexSorting > -- > > Key: LUCENE-8226 > URL: https://issues.apache.org/jira/browse/LUCENE-8226 > Project: Lucene - Core > Issue Type: Task >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > Fix For: 7.4 > > Attachments: LUCENE-8226.patch, LUCENE-8226.patch > > > Nightly runs of TestIndexSorting fail occasionally with OOM (see > [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/183/] for a > recent example, and it's been appearing in Erick's BadApple report too). It > looks as this is normally due to the combination of a large docset and > MemoryCodec. We should suppress MemoryCodec for these tests, on nightly runs > only if possible) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-12147) TestDocTermOrds.testTriggerUnInvertLimit should not use MemoryCodec
[ https://issues.apache.org/jira/browse/SOLR-12147?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Alan Woodward resolved SOLR-12147. -- Resolution: Fixed Fix Version/s: 7.4 > TestDocTermOrds.testTriggerUnInvertLimit should not use MemoryCodec > --- > > Key: SOLR-12147 > URL: https://issues.apache.org/jira/browse/SOLR-12147 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > Fix For: 7.4 > > > This can lead to OOM, for example in > [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.3/10/.|https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.3/10/] > It's already a nightly-only test, and it's always going to require a large > index. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12147) TestDocTermOrds.testTriggerUnInvertLimit should not use MemoryCodec
[ https://issues.apache.org/jira/browse/SOLR-12147?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429503#comment-16429503 ] ASF subversion and git services commented on SOLR-12147: Commit 2ae488aae2a0601148dcd1b6aa794489a0572349 in lucene-solr's branch refs/heads/master from [~romseygeek] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=2ae488a ] SOLR-12147: Don't use MemoryPostingsFormat for TestDocTermOrds.testTriggerUnInvertLimit This can lead to OOM on nightly runs, as it needs to create a very large index, and the CI machines don't have huge amounts of RAM. > TestDocTermOrds.testTriggerUnInvertLimit should not use MemoryCodec > --- > > Key: SOLR-12147 > URL: https://issues.apache.org/jira/browse/SOLR-12147 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > Fix For: 7.4 > > > This can lead to OOM, for example in > [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.3/10/.|https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.3/10/] > It's already a nightly-only test, and it's always going to require a large > index. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12147) TestDocTermOrds.testTriggerUnInvertLimit should not use MemoryCodec
[ https://issues.apache.org/jira/browse/SOLR-12147?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429501#comment-16429501 ] ASF subversion and git services commented on SOLR-12147: Commit 2ccea7b4754f10f8474e72ff22f77f6088fc9282 in lucene-solr's branch refs/heads/branch_7x from [~romseygeek] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=2ccea7b ] SOLR-12147: Don't use MemoryPostingsFormat for TestDocTermOrds.testTriggerUnInvertLimit This can lead to OOM on nightly runs, as it needs to create a very large index, and the CI machines don't have huge amounts of RAM. > TestDocTermOrds.testTriggerUnInvertLimit should not use MemoryCodec > --- > > Key: SOLR-12147 > URL: https://issues.apache.org/jira/browse/SOLR-12147 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > > This can lead to OOM, for example in > [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.3/10/.|https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.3/10/] > It's already a nightly-only test, and it's always going to require a large > index. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8226) Don't use MemoryCodec for nightly runs of TestIndexSorting
[ https://issues.apache.org/jira/browse/LUCENE-8226?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429502#comment-16429502 ] ASF subversion and git services commented on LUCENE-8226: - Commit 005da875211bc271257c1fb008a8355a3c1e9f3c in lucene-solr's branch refs/heads/master from [~romseygeek] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=005da87 ] LUCENE-8226: Don't generate unnecessarily massive indexes for index vs query sorting test > Don't use MemoryCodec for nightly runs of TestIndexSorting > -- > > Key: LUCENE-8226 > URL: https://issues.apache.org/jira/browse/LUCENE-8226 > Project: Lucene - Core > Issue Type: Task >Reporter: Alan Woodward >Priority: Major > Attachments: LUCENE-8226.patch, LUCENE-8226.patch > > > Nightly runs of TestIndexSorting fail occasionally with OOM (see > [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/183/] for a > recent example, and it's been appearing in Erick's BadApple report too). It > looks as this is normally due to the combination of a large docset and > MemoryCodec. We should suppress MemoryCodec for these tests, on nightly runs > only if possible) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8226) Don't use MemoryCodec for nightly runs of TestIndexSorting
[ https://issues.apache.org/jira/browse/LUCENE-8226?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429500#comment-16429500 ] ASF subversion and git services commented on LUCENE-8226: - Commit 3692314040b1f95ce11282ad2eb32a16efd59c88 in lucene-solr's branch refs/heads/branch_7x from [~romseygeek] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=3692314 ] LUCENE-8226: Don't generate unnecessarily massive indexes for index vs query sorting test > Don't use MemoryCodec for nightly runs of TestIndexSorting > -- > > Key: LUCENE-8226 > URL: https://issues.apache.org/jira/browse/LUCENE-8226 > Project: Lucene - Core > Issue Type: Task >Reporter: Alan Woodward >Priority: Major > Attachments: LUCENE-8226.patch, LUCENE-8226.patch > > > Nightly runs of TestIndexSorting fail occasionally with OOM (see > [https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/183/] for a > recent example, and it's been appearing in Erick's BadApple report too). It > looks as this is normally due to the combination of a large docset and > MemoryCodec. We should suppress MemoryCodec for these tests, on nightly runs > only if possible) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8242) Rename IndexSearcher.createNormalizedWeight()
[ https://issues.apache.org/jira/browse/LUCENE-8242?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429476#comment-16429476 ] Alan Woodward commented on LUCENE-8242: --- Here's a patch deprecating the method, and removing all uses of it. > Rename IndexSearcher.createNormalizedWeight() > - > > Key: LUCENE-8242 > URL: https://issues.apache.org/jira/browse/LUCENE-8242 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > Attachments: LUCENE-8242.patch > > > We don't have Weight normalization since LUCENE-7368, so this method name is > just plain wrong. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (LUCENE-8242) Rename IndexSearcher.createNormalizedWeight()
[ https://issues.apache.org/jira/browse/LUCENE-8242?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Alan Woodward updated LUCENE-8242: -- Attachment: LUCENE-8242.patch > Rename IndexSearcher.createNormalizedWeight() > - > > Key: LUCENE-8242 > URL: https://issues.apache.org/jira/browse/LUCENE-8242 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > Attachments: LUCENE-8242.patch > > > We don't have Weight normalization since LUCENE-7368, so this method name is > just plain wrong. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-7976) Make TieredMergePolicy respect maxSegmentSizeMB and allow singleton merges of very large segments
[ https://issues.apache.org/jira/browse/LUCENE-7976?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429447#comment-16429447 ] Erick Erickson commented on LUCENE-7976: Oh, and Mike (and others): Please don't go over this with too fine a comb on my behalf. I'm grateful for any time you do want to spend of course, but I don't consider this patch in good enough shape for really serious review. Your comments already may mean that I revise the approach in a major way, that's the level I'm aiming for now. > Make TieredMergePolicy respect maxSegmentSizeMB and allow singleton merges of > very large segments > - > > Key: LUCENE-7976 > URL: https://issues.apache.org/jira/browse/LUCENE-7976 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-7976.patch, LUCENE-7976.patch > > > We're seeing situations "in the wild" where there are very large indexes (on > disk) handled quite easily in a single Lucene index. This is particularly > true as features like docValues move data into MMapDirectory space. The > current TMP algorithm allows on the order of 50% deleted documents as per a > dev list conversation with Mike McCandless (and his blog here: > https://www.elastic.co/blog/lucenes-handling-of-deleted-documents). > Especially in the current era of very large indexes in aggregate, (think many > TB) solutions like "you need to distribute your collection over more shards" > become very costly. Additionally, the tempting "optimize" button exacerbates > the issue since once you form, say, a 100G segment (by > optimizing/forceMerging) it is not eligible for merging until 97.5G of the > docs in it are deleted (current default 5G max segment size). > The proposal here would be to add a new parameter to TMP, something like > (no, that's not serious name, suggestions > welcome) which would default to 100 (or the same behavior we have now). > So if I set this parameter to, say, 20%, and the max segment size stays at > 5G, the following would happen when segments were selected for merging: > > any segment with > 20% deleted documents would be merged or rewritten NO > > MATTER HOW LARGE. There are two cases, > >> the segment has < 5G "live" docs. In that case it would be merged with > >> smaller segments to bring the resulting segment up to 5G. If no smaller > >> segments exist, it would just be rewritten > >> The segment has > 5G "live" docs (the result of a forceMerge or optimize). > >> It would be rewritten into a single segment removing all deleted docs no > >> matter how big it is to start. The 100G example above would be rewritten > >> to an 80G segment for instance. > Of course this would lead to potentially much more I/O which is why the > default would be the same behavior we see now. As it stands now, though, > there's no way to recover from an optimize/forceMerge except to re-index from > scratch. We routinely see 200G-300G Lucene indexes at this point "in the > wild" with 10s of shards replicated 3 or more times. And that doesn't even > include having these over HDFS. > Alternatives welcome! Something like the above seems minimally invasive. A > new merge policy is certainly an alternative. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Comment Edited] (LUCENE-7976) Make TieredMergePolicy respect maxSegmentSizeMB and allow singleton merges of very large segments
[ https://issues.apache.org/jira/browse/LUCENE-7976?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429433#comment-16429433 ] Erick Erickson edited comment on LUCENE-7976 at 4/7/18 4:12 PM: Thanks for taking the time Mike! Going in reverse order: bq: I wonder if we could just relax TMP to allow it to consider merges with fewer than maxMergeAtOnce, and then "improve" the scoring function to give a good score to cases that would reclaim > X% deletions? Interesting. At this point in the change cycle I've got my head around most of what's going on and can think about how to do it better. We'd want to tweak things I should think so scoring could return "don't do this merge at all"? I'm thinking of the case where we have, say, 1 max-sized (or bigger) segment as a candidate with a few deletions, we wouldn't want to merge that at all, right? I'm thinking some threshold score above which we score it as "don't bother" The number of changes I'm introducing here does make me nervous, I wonder if taking a fresh look at it with an eye toward just doing the above would lead to less surgery I mean this has been working fine for years, I do worry that I'm introducing bugs... I don't mind throwing away a bunch of work if smaller changes can cure my problem. bq: I think what you mean is you want to change the forceMerge and forceMergeDeletes APIs in IndexWriter Right, that would have been the consequence. But I changed my mind on this yesterday, I don't think any Lucene API change is needed after all. What I did instead (not in the current patch) is default the Solr "update" command to pass Integer.MAX_VALUE for the max number of segments to forceMerge. That just flows into the TMP code without changing the API and lets maxMergedSegmentBytes control how many segments are created. Anyone who wants the old behavior needs to pass 1 like the default is now. bq: I think it's a bug that findForceMergeDeletes doesn't do the same thing OK, let me look this over again. Yesterday I started to see the differences between forceMerge and forceMergeDeletes and thought they should stay separate, but you seem to be saying the idea of combining them is worth exploring. I'll revisit this again this weekend. Wouldn't making that work require changing the findForceMergeDeletes interface? I'm perfectly willing but didn't want to do that without discussion. And it seems that then findForcedDeletesMerges and findForcedMerges would be very thin wrappers around the same code for both Or were you thinking of handling this differently? Thanks again... was (Author: erickerickson): Thanks for taking the time Mike! Going in reverse order: bq: I wonder if we could just relax TMP to allow it to consider merges with fewer than maxMergeAtOnce, and then "improve" the scoring function to give a good score to cases that would reclaim > X% deletions? Interesting. At this point in the change cycle I've got my head around most of what's going on and can think about how to do it better. We'd want to tweak things I should think so scoring could return "don't do this merge at all"? I'm thinking of the case where we have, say, 1 max-sized (or bigger) segment as a candidate with a few deletions, we wouldn't want to merge that at all, right? I'm thinking some threshold score above which we score it as "don't bother" The number of changes I'm introducing here does make me nervous, I wonder if taking a fresh look at it with an eye toward just doing the above would lead to less surgery I mean this has been working fine for years, I do worry that I'm introducing bugs... I don't mind throwing away a bunch of work if smaller changes can cure my problem. bq: I think what you mean is you want to change the forceMerge and forceMergeDeletes APIs in IndexWriter Right, that would have been the consequence. But I changed my mind on this yesterday, I don't think any Lucene API change is needed after all. What I did instead (not in the current patch) is default the Solr "update" command to Integer.MAX_VALUE for forceMerge. That just flows into the TMP code without changing the API and lets maxMergedSegmentBytes control how many segments are created. Anyone who wants the old behavior needs to pass 1, which is a change in behavior. bq: I think it's a bug that findForceMergeDeletes doesn't do the same thing OK, let me look this over again. Yesterday I started to see the differences between forceMerge and forceMergeDeletes and thought they should stay separate, but you seem to be saying the idea of combining them is worth exploring. I'll revisit this again this weekend. Wouldn't making that work require changing the findForceMergeDeletes interface? I'm perfectly willing but didn't want to do that without discussion. And it seems that then findForcedDeletesMerges and findForcedMerges would be very
[JENKINS] Lucene-Solr-NightlyTests-7.x - Build # 194 - Still unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/194/ 8 tests failed. FAILED: org.apache.solr.TestDistributedSearch.test Error Message: Captured an uncaught exception in thread: Thread[id=57314, name=Thread-50886, state=RUNNABLE, group=TGRP-TestDistributedSearch] Stack Trace: com.carrotsearch.randomizedtesting.UncaughtExceptionError: Captured an uncaught exception in thread: Thread[id=57314, name=Thread-50886, state=RUNNABLE, group=TGRP-TestDistributedSearch] at __randomizedtesting.SeedInfo.seed([94F000C2E7ADD4A4:1CA43F184951B95C]:0) Caused by: java.lang.AssertionError: Expected to find shardAddress in the up shard info: {error=org.apache.solr.client.solrj.SolrServerException: Time allowed to handle this request exceeded,trace=org.apache.solr.client.solrj.SolrServerException: Time allowed to handle this request exceeded at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:460) at org.apache.solr.handler.component.HttpShardHandlerFactory.makeLoadBalancedRequest(HttpShardHandlerFactory.java:275) at org.apache.solr.handler.component.HttpShardHandler.lambda$submit$0(HttpShardHandler.java:175) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) at java.util.concurrent.FutureTask.run(FutureTask.java:266) at com.codahale.metrics.InstrumentedExecutorService$InstrumentedRunnable.run(InstrumentedExecutorService.java:176) at org.apache.solr.common.util.ExecutorUtil$MDCAwareThreadPoolExecutor.lambda$execute$0(ExecutorUtil.java:192) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) ,time=28} at __randomizedtesting.SeedInfo.seed([94F000C2E7ADD4A4]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.apache.solr.TestDistributedSearch.comparePartialResponses(TestDistributedSearch.java:1191) at org.apache.solr.TestDistributedSearch$1.run(TestDistributedSearch.java:1147) FAILED: org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest.test Error Message: Timeout occured while waiting response from server at: http://127.0.0.1:39697/collection1 Stack Trace: org.apache.solr.client.solrj.SolrServerException: Timeout occured while waiting response from server at: http://127.0.0.1:39697/collection1 at __randomizedtesting.SeedInfo.seed([94F000C2E7ADD4A4:1CA43F184951B95C]:0) at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:654) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1106) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:886) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:819) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrClient.commit(SolrClient.java:484) at org.apache.solr.client.solrj.SolrClient.commit(SolrClient.java:463) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.commit(AbstractFullDistribZkTestBase.java:1591) at org.apache.solr.cloud.ChaosMonkeyNothingIsSafeTest.test(ChaosMonkeyNothingIsSafeTest.java:212) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968) at
[jira] [Commented] (LUCENE-7976) Make TieredMergePolicy respect maxSegmentSizeMB and allow singleton merges of very large segments
[ https://issues.apache.org/jira/browse/LUCENE-7976?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429433#comment-16429433 ] Erick Erickson commented on LUCENE-7976: Thanks for taking the time Mike! Going in reverse order: bq: I wonder if we could just relax TMP to allow it to consider merges with fewer than maxMergeAtOnce, and then "improve" the scoring function to give a good score to cases that would reclaim > X% deletions? Interesting. At this point in the change cycle I've got my head around most of what's going on and can think about how to do it better. We'd want to tweak things I should think so scoring could return "don't do this merge at all"? I'm thinking of the case where we have, say, 1 max-sized (or bigger) segment as a candidate with a few deletions, we wouldn't want to merge that at all, right? I'm thinking some threshold score above which we score it as "don't bother" The number of changes I'm introducing here does make me nervous, I wonder if taking a fresh look at it with an eye toward just doing the above would lead to less surgery I mean this has been working fine for years, I do worry that I'm introducing bugs... I don't mind throwing away a bunch of work if smaller changes can cure my problem. bq: I think what you mean is you want to change the forceMerge and forceMergeDeletes APIs in IndexWriter Right, that would have been the consequence. But I changed my mind on this yesterday, I don't think any Lucene API change is needed after all. What I did instead (not in the current patch) is default the Solr "update" command to Integer.MAX_VALUE for forceMerge. That just flows into the TMP code without changing the API and lets maxMergedSegmentBytes control how many segments are created. Anyone who wants the old behavior needs to pass 1, which is a change in behavior. bq: I think it's a bug that findForceMergeDeletes doesn't do the same thing OK, let me look this over again. Yesterday I started to see the differences between forceMerge and forceMergeDeletes and thought they should stay separate, but you seem to be saying the idea of combining them is worth exploring. I'll revisit this again this weekend. Wouldn't making that work require changing the findForceMergeDeletes interface? I'm perfectly willing but didn't want to do that without discussion. And it seems that then findForcedDeletesMerges and findForcedMerges would be very thin wrappers around the same code for both Or were you thinking of handling this differently? Thanks again... > Make TieredMergePolicy respect maxSegmentSizeMB and allow singleton merges of > very large segments > - > > Key: LUCENE-7976 > URL: https://issues.apache.org/jira/browse/LUCENE-7976 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-7976.patch, LUCENE-7976.patch > > > We're seeing situations "in the wild" where there are very large indexes (on > disk) handled quite easily in a single Lucene index. This is particularly > true as features like docValues move data into MMapDirectory space. The > current TMP algorithm allows on the order of 50% deleted documents as per a > dev list conversation with Mike McCandless (and his blog here: > https://www.elastic.co/blog/lucenes-handling-of-deleted-documents). > Especially in the current era of very large indexes in aggregate, (think many > TB) solutions like "you need to distribute your collection over more shards" > become very costly. Additionally, the tempting "optimize" button exacerbates > the issue since once you form, say, a 100G segment (by > optimizing/forceMerging) it is not eligible for merging until 97.5G of the > docs in it are deleted (current default 5G max segment size). > The proposal here would be to add a new parameter to TMP, something like > (no, that's not serious name, suggestions > welcome) which would default to 100 (or the same behavior we have now). > So if I set this parameter to, say, 20%, and the max segment size stays at > 5G, the following would happen when segments were selected for merging: > > any segment with > 20% deleted documents would be merged or rewritten NO > > MATTER HOW LARGE. There are two cases, > >> the segment has < 5G "live" docs. In that case it would be merged with > >> smaller segments to bring the resulting segment up to 5G. If no smaller > >> segments exist, it would just be rewritten > >> The segment has > 5G "live" docs (the result of a forceMerge or optimize). > >> It would be rewritten into a single segment removing all deleted docs no > >> matter how big it is to start. The 100G example above would be rewritten > >> to an 80G
[jira] [Commented] (SOLR-12200) ZkControllerTest failure. Leaking Overseer
[ https://issues.apache.org/jira/browse/SOLR-12200?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429365#comment-16429365 ] Mikhail Khludnev commented on SOLR-12200: - [^tests-failures.txt.gz] beasted locally > ZkControllerTest failure. Leaking Overseer > -- > > Key: SOLR-12200 > URL: https://issues.apache.org/jira/browse/SOLR-12200 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: Mikhail Khludnev >Priority: Major > Attachments: tests-failures.txt.gz, zk.fail.txt.gz > > > Failure seems suspiciously the same. >[junit4] 2> 499919 INFO > (TEST-ZkControllerTest.testReadConfigName-seed#[BC856CC565039E77]) > [n:127.0.0.1:8983_solr] o.a.s.c.Overseer Overseer > (id=73578760132362243-127.0.0.1:8983_solr-n_00) closing >[junit4] 2> 499920 INFO > (OverseerStateUpdate-73578760132362243-127.0.0.1:8983_solr-n_00) [ > ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:8983_solr >[junit4] 2> 499920 ERROR > (OverseerCollectionConfigSetProcessor-73578760132362243-127.0.0.1:8983_solr-n_00) > [] o.a.s.c.OverseerTaskProcessor Unable to prioritize overseer >[junit4] 2> java.lang.InterruptedException: null >[junit4] 2>at java.lang.Object.wait(Native Method) ~[?:1.8.0_152] >[junit4] 2>at java.lang.Object.wait(Object.java:502) > ~[?:1.8.0_152] >[junit4] 2>at > org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1409) > ~[zookeeper-3.4.11.jar:3.4 > then it spins in SessionExpiredException, all tests pass but suite fails due > to leaking Overseer. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12200) ZkControllerTest failure. Leaking Overseer
[ https://issues.apache.org/jira/browse/SOLR-12200?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Khludnev updated SOLR-12200: Attachment: tests-failures.txt.gz > ZkControllerTest failure. Leaking Overseer > -- > > Key: SOLR-12200 > URL: https://issues.apache.org/jira/browse/SOLR-12200 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: Mikhail Khludnev >Priority: Major > Attachments: tests-failures.txt.gz, zk.fail.txt.gz > > > Failure seems suspiciously the same. >[junit4] 2> 499919 INFO > (TEST-ZkControllerTest.testReadConfigName-seed#[BC856CC565039E77]) > [n:127.0.0.1:8983_solr] o.a.s.c.Overseer Overseer > (id=73578760132362243-127.0.0.1:8983_solr-n_00) closing >[junit4] 2> 499920 INFO > (OverseerStateUpdate-73578760132362243-127.0.0.1:8983_solr-n_00) [ > ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:8983_solr >[junit4] 2> 499920 ERROR > (OverseerCollectionConfigSetProcessor-73578760132362243-127.0.0.1:8983_solr-n_00) > [] o.a.s.c.OverseerTaskProcessor Unable to prioritize overseer >[junit4] 2> java.lang.InterruptedException: null >[junit4] 2>at java.lang.Object.wait(Native Method) ~[?:1.8.0_152] >[junit4] 2>at java.lang.Object.wait(Object.java:502) > ~[?:1.8.0_152] >[junit4] 2>at > org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1409) > ~[zookeeper-3.4.11.jar:3.4 > then it spins in SessionExpiredException, all tests pass but suite fails due > to leaking Overseer. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12200) ZkControllerTest failure. Leaking Overseer
[ https://issues.apache.org/jira/browse/SOLR-12200?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Khludnev updated SOLR-12200: Summary: ZkControllerTest failure. Leaking Overseer (was: ZkControllerTest failure) > ZkControllerTest failure. Leaking Overseer > -- > > Key: SOLR-12200 > URL: https://issues.apache.org/jira/browse/SOLR-12200 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: Mikhail Khludnev >Priority: Major > Attachments: zk.fail.txt.gz > > > Failure seems suspiciously the same. >[junit4] 2> 499919 INFO > (TEST-ZkControllerTest.testReadConfigName-seed#[BC856CC565039E77]) > [n:127.0.0.1:8983_solr] o.a.s.c.Overseer Overseer > (id=73578760132362243-127.0.0.1:8983_solr-n_00) closing >[junit4] 2> 499920 INFO > (OverseerStateUpdate-73578760132362243-127.0.0.1:8983_solr-n_00) [ > ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:8983_solr >[junit4] 2> 499920 ERROR > (OverseerCollectionConfigSetProcessor-73578760132362243-127.0.0.1:8983_solr-n_00) > [] o.a.s.c.OverseerTaskProcessor Unable to prioritize overseer >[junit4] 2> java.lang.InterruptedException: null >[junit4] 2>at java.lang.Object.wait(Native Method) ~[?:1.8.0_152] >[junit4] 2>at java.lang.Object.wait(Object.java:502) > ~[?:1.8.0_152] >[junit4] 2>at > org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1409) > ~[zookeeper-3.4.11.jar:3.4 > then it spins in SessionExpiredException, all tests pass but suite fails due > to leaking Overseer. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12200) ZkControllerTest failure
[ https://issues.apache.org/jira/browse/SOLR-12200?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Khludnev updated SOLR-12200: Summary: ZkControllerTest failure (was: ZkControllerTest failure probably caused by #testReadConfigName) > ZkControllerTest failure > > > Key: SOLR-12200 > URL: https://issues.apache.org/jira/browse/SOLR-12200 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: Mikhail Khludnev >Priority: Major > Attachments: zk.fail.txt.gz > > > Failure seems suspiciously the same. >[junit4] 2> 499919 INFO > (TEST-ZkControllerTest.testReadConfigName-seed#[BC856CC565039E77]) > [n:127.0.0.1:8983_solr] o.a.s.c.Overseer Overseer > (id=73578760132362243-127.0.0.1:8983_solr-n_00) closing >[junit4] 2> 499920 INFO > (OverseerStateUpdate-73578760132362243-127.0.0.1:8983_solr-n_00) [ > ] o.a.s.c.Overseer Overseer Loop exiting : 127.0.0.1:8983_solr >[junit4] 2> 499920 ERROR > (OverseerCollectionConfigSetProcessor-73578760132362243-127.0.0.1:8983_solr-n_00) > [] o.a.s.c.OverseerTaskProcessor Unable to prioritize overseer >[junit4] 2> java.lang.InterruptedException: null >[junit4] 2>at java.lang.Object.wait(Native Method) ~[?:1.8.0_152] >[junit4] 2>at java.lang.Object.wait(Object.java:502) > ~[?:1.8.0_152] >[junit4] 2>at > org.apache.zookeeper.ClientCnxn.submitRequest(ClientCnxn.java:1409) > ~[zookeeper-3.4.11.jar:3.4 > then it spins in SessionExpiredException, all tests pass but suite fails due > to leaking Overseer. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
Re: Unsubscribe
On 7 April 2018 at 18:52, Sarthak Sugandhiwrote: > Hi Team > > I want to unsubscribe from gene...@lucene.apache.org, > dev@lucene.apache.org and java-u...@lucene.apache.org. > Please help. > Solr mailing lists follow standard subscription/unsubscription procedures used by most mailing lists for open-source software, and you would have received an email about this when subscribing. Please see http://lucene.apache.org/solr/community.html#mailing-lists-irc for how to unsubscribe from the lists. Regards, Gora
Unsubscribe
Hi Team I want to unsubscribe from gene...@lucene.apache.org, dev@lucene.apache.org and java-u...@lucene.apache.org. Please help. Thanks, Sarthak
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-10) - Build # 21772 - Unstable!
Error processing tokens: Error while parsing action 'Text/ZeroOrMore/FirstOf/Token/DelimitedToken/DelimitedToken_Action3' at input position (line 79, pos 4): )"} ^ java.lang.OutOfMemoryError: Java heap space - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8242) Rename IndexSearcher.createNormalizedWeight()
[ https://issues.apache.org/jira/browse/LUCENE-8242?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429322#comment-16429322 ] Robert Muir commented on LUCENE-8242: - Please remove it, I see zero advantage in renaming it. > Rename IndexSearcher.createNormalizedWeight() > - > > Key: LUCENE-8242 > URL: https://issues.apache.org/jira/browse/LUCENE-8242 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > > We don't have Weight normalization since LUCENE-7368, so this method name is > just plain wrong. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12155) Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField()
[ https://issues.apache.org/jira/browse/SOLR-12155?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Khludnev updated SOLR-12155: Attachment: SOLR-12155.patch > Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField() > > > Key: SOLR-12155 > URL: https://issues.apache.org/jira/browse/SOLR-12155 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Affects Versions: 7.2.1 >Reporter: Kishor gandham >Assignee: Mikhail Khludnev >Priority: Major > Fix For: 7.4 > > Attachments: SOLR-12155.patch, SOLR-12155.patch, SOLR-12155.patch, > SOLR-12155.patch, SOLR-12155.patch, stack.txt > > > I am attaching a stack trace from our production Solr (7.2.1). Occasionally, > we are seeing SOLR becoming unresponsive. We are then forced to kill the JVM > and start solr again. > We have a lot of facet queries and our index has approximately 15 million > documents. We have recently started using json.facet queries and some of the > facet fields use DocValues. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12155) Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField()
[ https://issues.apache.org/jira/browse/SOLR-12155?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Khludnev updated SOLR-12155: Attachment: (was: SOLR-12155.patch) > Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField() > > > Key: SOLR-12155 > URL: https://issues.apache.org/jira/browse/SOLR-12155 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Affects Versions: 7.2.1 >Reporter: Kishor gandham >Assignee: Mikhail Khludnev >Priority: Major > Fix For: 7.4 > > Attachments: SOLR-12155.patch, SOLR-12155.patch, SOLR-12155.patch, > SOLR-12155.patch, stack.txt > > > I am attaching a stack trace from our production Solr (7.2.1). Occasionally, > we are seeing SOLR becoming unresponsive. We are then forced to kill the JVM > and start solr again. > We have a lot of facet queries and our index has approximately 15 million > documents. We have recently started using json.facet queries and some of the > facet fields use DocValues. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12155) Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField()
[ https://issues.apache.org/jira/browse/SOLR-12155?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429321#comment-16429321 ] Mikhail Khludnev commented on SOLR-12155: - [^SOLR-12155.patch] convincing Forbidden API... > Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField() > > > Key: SOLR-12155 > URL: https://issues.apache.org/jira/browse/SOLR-12155 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Affects Versions: 7.2.1 >Reporter: Kishor gandham >Assignee: Mikhail Khludnev >Priority: Major > Fix For: 7.4 > > Attachments: SOLR-12155.patch, SOLR-12155.patch, SOLR-12155.patch, > SOLR-12155.patch, SOLR-12155.patch, stack.txt > > > I am attaching a stack trace from our production Solr (7.2.1). Occasionally, > we are seeing SOLR becoming unresponsive. We are then forced to kill the JVM > and start solr again. > We have a lot of facet queries and our index has approximately 15 million > documents. We have recently started using json.facet queries and some of the > facet fields use DocValues. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12155) Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField()
[ https://issues.apache.org/jira/browse/SOLR-12155?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Khludnev updated SOLR-12155: Attachment: SOLR-12155.patch > Solr 7.2.1 deadlock in UnInvertedField.getUnInvertedField() > > > Key: SOLR-12155 > URL: https://issues.apache.org/jira/browse/SOLR-12155 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Affects Versions: 7.2.1 >Reporter: Kishor gandham >Assignee: Mikhail Khludnev >Priority: Major > Fix For: 7.4 > > Attachments: SOLR-12155.patch, SOLR-12155.patch, SOLR-12155.patch, > SOLR-12155.patch, SOLR-12155.patch, stack.txt > > > I am attaching a stack trace from our production Solr (7.2.1). Occasionally, > we are seeing SOLR becoming unresponsive. We are then forced to kill the JVM > and start solr again. > We have a lot of facet queries and our index has approximately 15 million > documents. We have recently started using json.facet queries and some of the > facet fields use DocValues. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12201) TestReplicationHandler.doTestIndexFetchOnMasterRestart(): unexpected replication failures
[ https://issues.apache.org/jira/browse/SOLR-12201?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16429313#comment-16429313 ] Mikhail Khludnev commented on SOLR-12201: - reproduced locally bq. ant test -Dtestcase=TestReplicationHandler -Dtests.method=doTestIndexFetchOnMasterRestart -Dtests.seed=C1A11EE85E7B0C57 -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true -Dtests.locale=en -Dtests.timezone=Europe/Isle_of_Man -Dtests.asserts=true -Dtests.file.encoding=UTF-8 {code} [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestReplicationHandler -Dtests.method=doTestIndexFetchOnMasterRestart -Dtests.seed=C1A11EE85E7B0C57 -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true -Dtests.locale=en -Dtests.timezone=Europe/Isle_of_Man -Dtests.asserts=true -Dtests.file.encoding=UTF-8 [junit4] FAILURE 145s | TestReplicationHandler.doTestIndexFetchOnMasterRestart <<< [junit4]> Throwable #1: java.lang.AssertionError: expected:<1> but was:<3> [junit4]>at __randomizedtesting.SeedInfo.seed([C1A11EE85E7B0C57:1956DA0CF5A0CE0B]:0) [junit4]>at org.apache.solr.handler.TestReplicationHandler.doTestIndexFetchOnMasterRestart(TestReplicationHandler.java:666) {code} > TestReplicationHandler.doTestIndexFetchOnMasterRestart(): unexpected > replication failures > - > > Key: SOLR-12201 > URL: https://issues.apache.org/jira/browse/SOLR-12201 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Steve Rowe >Priority: Major > > This is a BadApple'd test, and in local beasting failed 31/100 iterations. > E.g. from > [https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-master/24/]: > {noformat} >[junit4] 1> SHALIN: > {responseHeader={status=0,QTime=150},details={indexSize=11.2 > KB,indexPath=/home/jenkins/jenkins-slave/workspace/PreCommit-SOLR-Build/sourcedir/solr/build/solr-core/test/J1/temp/solr.handler.TestReplicationHandler_C1A11EE85E7B0C57-001/solr-instance-008/./collection1/data/index/,commits=[{indexVersion=1523043739675,generation=2,filelist=[_0.fdt, > _0.fdx, _0.fnm, _0.nvd, _0.nvm, _0.si, _0_FSTOrd50_0.doc, _0_FSTOrd50_0.tbk, > _0_FSTOrd50_0.tix, > segments_2]}],isMaster=false,isSlave=true,indexVersion=1523043739675,generation=2,slave={masterDetails={indexSize=11.27 > > KB,indexPath=/home/jenkins/jenkins-slave/workspace/PreCommit-SOLR-Build/sourcedir/solr/build/solr-core/test/J1/temp/solr.handler.TestReplicationHandler_C1A11EE85E7B0C57-001/solr-instance-007/./collection1/data/index/,commits=[{indexVersion=0,generation=1,filelist=[segments_1]}, > {indexVersion=1523043739675,generation=2,filelist=[_0.fdt, _0.fdx, _0.fnm, > _0.nvd, _0.nvm, _0.si, _0_FSTOrd50_0.doc, _0_FSTOrd50_0.tbk, > _0_FSTOrd50_0.tix, > segments_2]}],isMaster=true,isSlave=false,indexVersion=1523043739675,generation=2,master={confFiles=schema.xml,replicateAfter=[commit, > > startup],replicationEnabled=true,replicableVersion=1523043739675,replicableGeneration=2}},masterUrl=http://127.0.0.1:36880/solr/collection1,pollInterval=00:00:01,nextExecutionAt=Fri > Apr 06 20:42:21 BST 2018,indexReplicatedAt=Fri Apr 06 20:42:20 BST > 2018,indexReplicatedAtList=[Fri Apr 06 20:42:20 BST 2018, Fri Apr 06 20:42:17 > BST 2018],replicationFailedAtList=[Fri Apr 06 20:42:17 BST > 2018],timesIndexReplicated=2,lastCycleBytesDownloaded=11650,timesFailed=1,replicationFailedAt=Fri > Apr 06 20:42:17 BST 2018,previousCycleTimeInSeconds=0,currentDate=Fri Apr 06 > 20:42:21 BST 2018,isPollingDisabled=false,isReplicating=false}}} > [...] >[junit4] 2> NOTE: reproduce with: ant test > -Dtestcase=TestReplicationHandler > -Dtests.method=doTestIndexFetchOnMasterRestart -Dtests.seed=C1A11EE85E7B0C57 > -Dtests.multiplier=2 -Dtests.slow=true -Dtests.badapples=true > -Dtests.locale=en -Dtests.timezone=Europe/Isle_of_Man -Dtests.asserts=true > -Dtests.file.encoding=UTF-8 >[junit4] FAILURE 9.39s J1 | > TestReplicationHandler.doTestIndexFetchOnMasterRestart <<< >[junit4]> Throwable #1: java.lang.AssertionError: expected:<1> but > was:<2> >[junit4]> at > __randomizedtesting.SeedInfo.seed([C1A11EE85E7B0C57:1956DA0CF5A0CE0B]:0) >[junit4]> at > org.apache.solr.handler.TestReplicationHandler.doTestIndexFetchOnMasterRestart(TestReplicationHandler.java:666) >[junit4]> at java.lang.Thread.run(Thread.java:748) > {noformat} > The failed assertion is on line 666: > {code:java|title=TestReplicationHandler.java} > 666:assertEquals(1, > Integer.parseInt(getSlaveDetails("timesIndexReplicated"))); > 667:String timesFailed = getSlaveDetails("timesFailed"); > 668:assertEquals(0, Integer.parseInt(timesFailed != null ? timesFailed : >
[GitHub] lucene-solr issue #345: LUCENE-8229: Add Weight.matches() method
Github user romseygeek commented on the issue: https://github.com/apache/lucene-solr/pull/345 I added an assertion to QueryUtils that calls Weight.matches() for each collected document and ensures that the response isn't null (this actually caught a bug in MTQ's handling). I've also rejigged things so that you can call Matches.getMatches(field) multiple times, although the call isn't thread-safe. --- - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] lucene-solr pull request #345: LUCENE-8229: Add Weight.matches() method
Github user romseygeek commented on a diff in the pull request: https://github.com/apache/lucene-solr/pull/345#discussion_r179913306 --- Diff: lucene/core/src/test/org/apache/lucene/search/TestMatchesIterator.java --- @@ -0,0 +1,317 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.lucene.search; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +import org.apache.lucene.analysis.MockAnalyzer; +import org.apache.lucene.document.Document; +import org.apache.lucene.document.FieldType; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexReader; +import org.apache.lucene.index.LeafReaderContext; +import org.apache.lucene.index.RandomIndexWriter; +import org.apache.lucene.index.ReaderUtil; +import org.apache.lucene.index.Term; +import org.apache.lucene.store.Directory; +import org.apache.lucene.util.LuceneTestCase; + +public class TestMatchesIterator extends LuceneTestCase { + + protected IndexSearcher searcher; + protected Directory directory; + protected IndexReader reader; + + public static final String FIELD_WITH_OFFSETS = "field_offsets"; + public static final String FIELD_NO_OFFSETS = "field_no_offsets"; + + public static final FieldType OFFSETS = new FieldType(TextField.TYPE_STORED); + static { + OFFSETS.setIndexOptions(IndexOptions.DOCS_AND_FREQS_AND_POSITIONS_AND_OFFSETS); + } + + @Override + public void tearDown() throws Exception { +reader.close(); +directory.close(); +super.tearDown(); + } + + @Override + public void setUp() throws Exception { +super.setUp(); +directory = newDirectory(); +RandomIndexWriter writer = new RandomIndexWriter(random(), directory, +newIndexWriterConfig(new MockAnalyzer(random())).setMergePolicy(newLogMergePolicy())); +for (int i = 0; i < docFields.length; i++) { + Document doc = new Document(); + doc.add(newField(FIELD_WITH_OFFSETS, docFields[i], OFFSETS)); + doc.add(newField(FIELD_NO_OFFSETS, docFields[i], TextField.TYPE_STORED)); + doc.add(new NumericDocValuesField("id", i)); + doc.add(newField("id", Integer.toString(i), TextField.TYPE_STORED)); + writer.addDocument(doc); +} +writer.forceMerge(1); +reader = writer.getReader(); +writer.close(); +searcher = newSearcher(getOnlyLeafReader(reader)); + } + + protected String[] docFields = { + "w1 w2 w3 w4 w5", + "w1 w3 w2 w3 zz", + "w1 xx w2 yy w4", + "w1 w2 w1 w4 w2 w3", + "nothing matches this document" + }; + + void checkMatches(Query q, String field, int[][] expected) throws IOException { +Weight w = searcher.createNormalizedWeight(q, ScoreMode.COMPLETE_NO_SCORES); +for (int i = 0; i < expected.length; i++) { + LeafReaderContext ctx = searcher.leafContexts.get(ReaderUtil.subIndex(expected[i][0], searcher.leafContexts)); + int doc = expected[i][0] - ctx.docBase; + Matches matches = w.matches(ctx, doc); + if (matches == null) { +assertEquals(expected[i].length, 1); +continue; + } + MatchesIterator it = matches.getMatches(field); + checkFieldMatches(it, expected[i]); +} + } + + void checkFieldMatches(MatchesIterator it, int[] expected) throws IOException { +int pos = 1; +while (it.next()) { + //System.out.println(expected[i][pos] + "->" + expected[i][pos + 1] + "[" + expected[i][pos + 2] + "->" + expected[i][pos + 3] + "]"); + assertEquals(expected[pos], it.startPosition()); + assertEquals(expected[pos + 1], it.endPosition()); + assertEquals(expected[pos + 2], it.startOffset()); +
[GitHub] lucene-solr pull request #345: LUCENE-8229: Add Weight.matches() method
Github user romseygeek commented on a diff in the pull request: https://github.com/apache/lucene-solr/pull/345#discussion_r179913304 --- Diff: lucene/core/src/java/org/apache/lucene/search/Weight.java --- @@ -69,6 +69,21 @@ protected Weight(Query query) { */ public abstract void extractTerms(Set terms); + /** + * Returns {@link Matches} for a specific document, or {@code null} if the document + * does not match the parent query --- End diff -- ++ --- - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-7.x-Linux (64bit/jdk-10) - Build # 1662 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1662/ Java: 64bit/jdk-10 -XX:-UseCompressedOops -XX:+UseG1GC 1 tests failed. FAILED: org.apache.solr.cloud.autoscaling.ScheduledTriggerTest.testTrigger Error Message: expected:<3> but was:<2> Stack Trace: java.lang.AssertionError: expected:<3> but was:<2> at __randomizedtesting.SeedInfo.seed([5A0AAEDDC3E5DB7:666B9C6F45F12E9A]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.junit.Assert.assertEquals(Assert.java:456) at org.apache.solr.cloud.autoscaling.ScheduledTriggerTest.scheduledTriggerTest(ScheduledTriggerTest.java:111) at org.apache.solr.cloud.autoscaling.ScheduledTriggerTest.testTrigger(ScheduledTriggerTest.java:64) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) Build Log: [...truncated 13064
[JENKINS] Lucene-Solr-BadApples-Tests-master - Build # 31 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-master/31/ 2 tests failed. FAILED: org.apache.solr.cloud.TestAuthenticationFramework.testBasics Error Message: Error from server at https://127.0.0.1:57804/solr/testcollection_shard1_replica_n3: Expected mime type application/octet-stream but got text/html.Error 404 Can not find: /solr/testcollection_shard1_replica_n3/update HTTP ERROR 404 Problem accessing /solr/testcollection_shard1_replica_n3/update. Reason: Can not find: /solr/testcollection_shard1_replica_n3/updatehttp://eclipse.org/jetty;>Powered by Jetty:// 9.4.8.v20171121 Stack Trace: org.apache.solr.client.solrj.impl.CloudSolrClient$RouteException: Error from server at https://127.0.0.1:57804/solr/testcollection_shard1_replica_n3: Expected mime type application/octet-stream but got text/html. Error 404 Can not find: /solr/testcollection_shard1_replica_n3/update HTTP ERROR 404 Problem accessing /solr/testcollection_shard1_replica_n3/update. Reason: Can not find: /solr/testcollection_shard1_replica_n3/updatehttp://eclipse.org/jetty;>Powered by Jetty:// 9.4.8.v20171121 at __randomizedtesting.SeedInfo.seed([A1878ED8B1D0B67F:9C5F20F4893EE80F]:0) at org.apache.solr.client.solrj.impl.CloudSolrClient.directUpdate(CloudSolrClient.java:551) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1015) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:886) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:948) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:948) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:948) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:948) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:948) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:819) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.request.UpdateRequest.commit(UpdateRequest.java:233) at org.apache.solr.cloud.TestAuthenticationFramework.collectionCreateSearchDeleteTwice(TestAuthenticationFramework.java:127) at org.apache.solr.cloud.TestAuthenticationFramework.testBasics(TestAuthenticationFramework.java:75) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36)