[JENKINS] Lucene-Solr-BadApples-NightlyTests-master - Build # 19 - Failure
Build: https://builds.apache.org/job/Lucene-Solr-BadApples-NightlyTests-master/19/ 15 tests failed. FAILED: org.apache.lucene.index.TestIndexingSequenceNumbers.testStressConcurrentCommit Error Message: this IndexWriter is closed Stack Trace: org.apache.lucene.store.AlreadyClosedException: this IndexWriter is closed at org.apache.lucene.index.IndexWriter.ensureOpen(IndexWriter.java:671) at org.apache.lucene.index.IndexWriter.ensureOpen(IndexWriter.java:685) at org.apache.lucene.index.IndexWriter.commit(IndexWriter.java:3412) at org.apache.lucene.index.TestIndexingSequenceNumbers.testStressConcurrentCommit(TestIndexingSequenceNumbers.java:228) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Caused by: java.lang.OutOfMemoryError: Java heap space at __randomizedtesting.SeedInfo.seed([EBB758A2DF516552]:0) at java.util.HashMap.resize(HashMap.java:704) at java.util.HashMap.putVal(HashMap.java:663) at java.util.HashMap.put(HashMap.java:612) at org.apache.lucene.index.BufferedUpdates.addTerm(BufferedUpdates.java:227) at org.apache.lucene.index.DocumentsWriterDeleteQueue$TermNode.apply(DocumentsWriterDeleteQueue.java:361) at
[JENKINS] Lucene-Solr-BadApples-Tests-7.x - Build # 101 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-BadApples-Tests-7.x/101/ 7 tests failed. FAILED: org.apache.solr.cloud.autoscaling.IndexSizeTriggerTest.testSplitIntegration Error Message: last state: DocCollection(testSplitIntegration_collection//clusterstate.json/64)={ "replicationFactor":"1", "pullReplicas":"0", "router":{"name":"compositeId"}, "maxShardsPerNode":"2", "autoAddReplicas":"false", "nrtReplicas":"2", "tlogReplicas":"0", "autoCreated":"true", "shards":{ "shard2":{ "replicas":{ "core_node3":{ "core":"testSplitIntegration_collection_shard2_replica_n3", "leader":"true", "SEARCHER.searcher.maxDoc":11, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":15740, "node_name":"127.0.0.1:10008_solr", "state":"active", "type":"NRT", "INDEX.sizeInGB":1.4659017324447632E-5, "SEARCHER.searcher.numDocs":11}, "core_node4":{ "core":"testSplitIntegration_collection_shard2_replica_n4", "SEARCHER.searcher.maxDoc":11, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":15740, "node_name":"127.0.0.1:10007_solr", "state":"active", "type":"NRT", "INDEX.sizeInGB":1.4659017324447632E-5, "SEARCHER.searcher.numDocs":11}}, "range":"0-7fff", "state":"active"}, "shard1":{ "stateTimestamp":"1531740462357473050", "replicas":{ "core_node1":{ "core":"testSplitIntegration_collection_shard1_replica_n1", "leader":"true", "SEARCHER.searcher.maxDoc":14, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":17240, "node_name":"127.0.0.1:10008_solr", "state":"active", "type":"NRT", "INDEX.sizeInGB":1.605600118637085E-5, "SEARCHER.searcher.numDocs":14}, "core_node2":{ "core":"testSplitIntegration_collection_shard1_replica_n2", "SEARCHER.searcher.maxDoc":14, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":17240, "node_name":"127.0.0.1:10007_solr", "state":"active", "type":"NRT", "INDEX.sizeInGB":1.605600118637085E-5, "SEARCHER.searcher.numDocs":14}}, "range":"8000-", "state":"inactive"}, "shard1_1":{ "parent":"shard1", "stateTimestamp":"1531740462386633150", "range":"c000-", "state":"active", "replicas":{ "core_node10":{ "leader":"true", "core":"testSplitIntegration_collection_shard1_1_replica1", "SEARCHER.searcher.maxDoc":7, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":13740, "node_name":"127.0.0.1:10007_solr", "base_url":"http://127.0.0.1:10007/solr;, "state":"active", "type":"NRT", "INDEX.sizeInGB":1.2796372175216675E-5, "SEARCHER.searcher.numDocs":7}, "core_node9":{ "core":"testSplitIntegration_collection_shard1_1_replica0", "SEARCHER.searcher.maxDoc":7, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":13740, "node_name":"127.0.0.1:10008_solr", "base_url":"http://127.0.0.1:10008/solr;, "state":"active", "type":"NRT", "INDEX.sizeInGB":1.2796372175216675E-5, "SEARCHER.searcher.numDocs":7}}}, "shard1_0":{ "parent":"shard1", "stateTimestamp":"1531740462386414000", "range":"8000-bfff", "state":"active", "replicas":{ "core_node7":{ "leader":"true", "core":"testSplitIntegration_collection_shard1_0_replica0", "SEARCHER.searcher.maxDoc":7, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":23980, "node_name":"127.0.0.1:10008_solr", "base_url":"http://127.0.0.1:10008/solr;, "state":"active", "type":"NRT", "INDEX.sizeInGB":2.2333115339279175E-5, "SEARCHER.searcher.numDocs":7}, "core_node8":{ "core":"testSplitIntegration_collection_shard1_0_replica1", "SEARCHER.searcher.maxDoc":7, "SEARCHER.searcher.deletedDocs":0, "INDEX.sizeInBytes":23980, "node_name":"127.0.0.1:10007_solr", "base_url":"http://127.0.0.1:10007/solr;, "state":"active", "type":"NRT", "INDEX.sizeInGB":2.2333115339279175E-5, "SEARCHER.searcher.numDocs":7} Stack Trace: java.util.concurrent.TimeoutException: last state: DocCollection(testSplitIntegration_collection//clusterstate.json/64)={ "replicationFactor":"1", "pullReplicas":"0", "router":{"name":"compositeId"}, "maxShardsPerNode":"2", "autoAddReplicas":"false", "nrtReplicas":"2",
[jira] [Commented] (LUCENE-8306) Allow iteration over the term positions of a Match
[ https://issues.apache.org/jira/browse/LUCENE-8306?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543985#comment-16543985 ] David Smiley commented on LUCENE-8306: -- RE TermPostingsEnum -- haha; I see we discussed it above. Doh! > Allow iteration over the term positions of a Match > -- > > Key: LUCENE-8306 > URL: https://issues.apache.org/jira/browse/LUCENE-8306 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Alan Woodward >Assignee: Alan Woodward >Priority: Major > Attachments: LUCENE-8306.patch, LUCENE-8306.patch > > > For multi-term queries such as phrase queries, the matches API currently just > returns information about the span of the whole match. It would be useful to > also expose information about the matching terms within the phrase. The same > would apply to Spans and Interval queries. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12519) Support Deeply Nested Docs In Child Documents Transformer
[ https://issues.apache.org/jira/browse/SOLR-12519?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543976#comment-16543976 ] David Smiley commented on SOLR-12519: - Oh I understand now. My suggestion to use PathHierarchyTokenizerFactory was centered around use-cases of querying for child docs purely by this path (e.g. all paths that look like this, etc.). If the query is find all child docs that match some arbitrary query (which is what "childFilter" is), and furthermore _their_ ancestors, then PathHierarchyTokenizerFactory may not be so useful in that. Sorry for the wild goose chase; though I suspect we'll revisit the use of PathHierarchyTokenizerFactory in the near future. I think we can do this with DocValues to store the nest path, and with modifications to ChildDocTransformer's loop over matching child documents. Recognize first how Lucene/Solr actually sequence the arrangement of nested child documents. Any given child document always comes _before_ it's parent (and thus recursively so). Therefore, what can be done is to look at all documents _after_ a matching child document to see which of those is an ancestor of a matching child document. Detecting if child doc X has an ancestor of doc X + N is a matter of comparing if the path at X + N is a prefix of the path at X. You stop looping forward once you reach the root document -- tracked in parentsFilter bits. If that's not enough information for you to implement this, I can post a patch modification to ChildDocTransformer that will do this, and maybe you could take it further from there (e.g. restructure the ancestors into a nice hierarchy). > Support Deeply Nested Docs In Child Documents Transformer > - > > Key: SOLR-12519 > URL: https://issues.apache.org/jira/browse/SOLR-12519 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: mosh >Priority: Major > Attachments: SOLR-12519-no-commit.patch > > Time Spent: 10m > Remaining Estimate: 0h > > As discussed in SOLR-12298, to make use of the meta-data fields in > SOLR-12441, there needs to be a smarter child document transformer, which > provides the ability to rebuild the original nested documents' structure. > In addition, I also propose the transformer will also have the ability to > bring only some of the original hierarchy, to prevent unnecessary block join > queries. e.g. > {code} {"a": "b", "c": [ {"e": "f"}, {"e": "g"} , {"h": "i"} ]} {code} > Incase my query is for all the children of "a:b", which contain the key "e" > in them, the query will be broken in to two parts: > 1. The parent query "a:b" > 2. The child query "e:*". > If the only children flag is on, the transformer will return the following > documents: > {code}[ {"e": "f"}, {"e": "g"} ]{code} > In case the flag was not turned on(perhaps the default state), the whole > document hierarchy will be returned, containing only the matching children: > {code}{"a": "b", "c": [ {"e": "f"}, {"e": "g"} ]{code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12441) Add deeply nested documents URP
[ https://issues.apache.org/jira/browse/SOLR-12441?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543970#comment-16543970 ] ASF subversion and git services commented on SOLR-12441: Commit 048a8148f5432309b7016aa8ffb412b072063655 in lucene-solr's branch refs/heads/branch_7x from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=048a814 ] SOLR-12441: Lowercase _NEST_PARENT_ and _NEST_PATH_ fields. (cherry picked from commit 4246089) > Add deeply nested documents URP > --- > > Key: SOLR-12441 > URL: https://issues.apache.org/jira/browse/SOLR-12441 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: mosh >Assignee: David Smiley >Priority: Major > Fix For: 7.5 > > Time Spent: 7.5h > Remaining Estimate: 0h > > As discussed in > [SOLR-12298|https://issues.apache.org/jira/browse/SOLR-12298], there ought to > be an URP to add metadata fields to childDocuments in order to allow a > transformer to rebuild the original document hierarchy. > {quote}I propose we add the following fields: > # __nestParent__ > # _nestLevel_ > # __nestPath__ > __nestParent__: This field wild will store the document's parent docId, to be > used for building the whole hierarchy, using a new document transformer, as > suggested by Jan on the mailing list. > _nestLevel_: This field will store the level of the specified field in the > document, using an int value. This field can be used for the parentFilter, > eliminating the need to provide a parentFilter, which will be set by default > as "_level_:queriedFieldLevel". > _nestLevel_: This field will contain the full path, separated by a specific > reserved char e.g., '.' > for example: "first.second.third". > This will enable users to search for a specific path, or provide a regular > expression to search for fields sharing the same name in different levels of > the document, filtering using the level key if needed. > {quote} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12441) Add deeply nested documents URP
[ https://issues.apache.org/jira/browse/SOLR-12441?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543969#comment-16543969 ] ASF subversion and git services commented on SOLR-12441: Commit 424608946c10c7af1cb369a2a0247db0e6dd9744 in lucene-solr's branch refs/heads/master from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=4246089 ] SOLR-12441: Lowercase _NEST_PARENT_ and _NEST_PATH_ fields. > Add deeply nested documents URP > --- > > Key: SOLR-12441 > URL: https://issues.apache.org/jira/browse/SOLR-12441 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: mosh >Assignee: David Smiley >Priority: Major > Fix For: 7.5 > > Time Spent: 7.5h > Remaining Estimate: 0h > > As discussed in > [SOLR-12298|https://issues.apache.org/jira/browse/SOLR-12298], there ought to > be an URP to add metadata fields to childDocuments in order to allow a > transformer to rebuild the original document hierarchy. > {quote}I propose we add the following fields: > # __nestParent__ > # _nestLevel_ > # __nestPath__ > __nestParent__: This field wild will store the document's parent docId, to be > used for building the whole hierarchy, using a new document transformer, as > suggested by Jan on the mailing list. > _nestLevel_: This field will store the level of the specified field in the > document, using an int value. This field can be used for the parentFilter, > eliminating the need to provide a parentFilter, which will be set by default > as "_level_:queriedFieldLevel". > _nestLevel_: This field will contain the full path, separated by a specific > reserved char e.g., '.' > for example: "first.second.third". > This will enable users to search for a specific path, or provide a regular > expression to search for fields sharing the same name in different levels of > the document, filtering using the level key if needed. > {quote} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12412) Leader should give up leadership when IndexWriter.tragedy occur
[ https://issues.apache.org/jira/browse/SOLR-12412?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543966#comment-16543966 ] David Smiley commented on SOLR-12412: - Yes, +1 to adding a withIndexWriter(lambda) method similar to this guy: org.apache.solr.core.SolrCore#withSearcher This ref-counted business is error-rpone. ~ David > Leader should give up leadership when IndexWriter.tragedy occur > --- > > Key: SOLR-12412 > URL: https://issues.apache.org/jira/browse/SOLR-12412 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Cao Manh Dat >Assignee: Cao Manh Dat >Priority: Major > Attachments: SOLR-12412.patch, SOLR-12412.patch > > > When a leader meets some kind of unrecoverable exception (ie: > CorruptedIndexException). The shard will go into the readable state and human > has to intervene. In that case, it will be the best if the leader gives up > its leadership and let other replicas become the leader. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Assigned] (SOLR-12551) Upgrade to Tika 1.18
[ https://issues.apache.org/jira/browse/SOLR-12551?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erick Erickson reassigned SOLR-12551: - Assignee: Erick Erickson > Upgrade to Tika 1.18 > > > Key: SOLR-12551 > URL: https://issues.apache.org/jira/browse/SOLR-12551 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tim Allison >Assignee: Erick Erickson >Priority: Minor > Time Spent: 10m > Remaining Estimate: 0h > > Until 1.19 is ready (SOLR-12423), let's upgrade to 1.18. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-12552) Connection Threads Taking Time
[ https://issues.apache.org/jira/browse/SOLR-12552?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erick Erickson resolved SOLR-12552. --- Resolution: Incomplete This issue tracker is not a support portal. Please raise this question on the user's list at solr-u...@lucene.apache.org, see: (http://lucene.apache.org/solr/community.html#mailing-lists-irc) there are a _lot_ more people watching that list who may be able to help and you'll probably get responses much more quickly. If it's determined that this really is a code issue or enhancement to Solr and not a configuration/usage problem, we can raise a new JIRA or reopen this one. > Connection Threads Taking Time > -- > > Key: SOLR-12552 > URL: https://issues.apache.org/jira/browse/SOLR-12552 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Gulam Uddin >Priority: Major > > Connection Threads are taking time > > * ava.net.SocketInputStream.socketRead0(Native Method) > * java.net.SocketInputStream.socketRead(SocketInputStream.java:116) > * java.net.SocketInputStream.read(SocketInputStream.java:170) > * java.net.SocketInputStream.read(SocketInputStream.java:141) > * org.eclipse.jetty.io.ByteArrayBuffer.readFrom(ByteArrayBuffer.java:391) > * org.eclipse.jetty.io.bio.StreamEndPoint.fill(StreamEndPoint.java:141) > * > org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.fill(SocketConnector.java:227) > * org.eclipse.jetty.http.HttpParser.fill(HttpParser.java:1040) > * org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:280) > * org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235) > * > org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72) > * > org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264) > * > org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608) > * > org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543) > * java.lang.Thread.run(Thread.java:745) > Any Idea ?? > -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-10) - Build # 22451 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/22451/ Java: 64bit/jdk-10 -XX:-UseCompressedOops -XX:+UseParallelGC 3 tests failed. FAILED: org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive Error Message: Jetty Connector is not open: -2 Stack Trace: java.lang.IllegalStateException: Jetty Connector is not open: -2 at __randomizedtesting.SeedInfo.seed([732B60E251609E4C:F69F4C956D9F27D4]:0) at org.apache.solr.client.solrj.embedded.JettySolrRunner.getBaseUrl(JettySolrRunner.java:499) at org.apache.solr.cloud.MiniSolrCloudCluster.getReplicaJetty(MiniSolrCloudCluster.java:539) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:100) at org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive(LeaderTragicEventTest.java:150) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) FAILED:
[jira] [Created] (SOLR-12552) Connection Threads Taking Time
Gulam Uddin created SOLR-12552: -- Summary: Connection Threads Taking Time Key: SOLR-12552 URL: https://issues.apache.org/jira/browse/SOLR-12552 Project: Solr Issue Type: Bug Security Level: Public (Default Security Level. Issues are Public) Reporter: Gulam Uddin Connection Threads are taking time * ava.net.SocketInputStream.socketRead0(Native Method) * java.net.SocketInputStream.socketRead(SocketInputStream.java:116) * java.net.SocketInputStream.read(SocketInputStream.java:170) * java.net.SocketInputStream.read(SocketInputStream.java:141) * org.eclipse.jetty.io.ByteArrayBuffer.readFrom(ByteArrayBuffer.java:391) * org.eclipse.jetty.io.bio.StreamEndPoint.fill(StreamEndPoint.java:141) * org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.fill(SocketConnector.java:227) * org.eclipse.jetty.http.HttpParser.fill(HttpParser.java:1040) * org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:280) * org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235) * org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72) * org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264) * org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608) * org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543) * java.lang.Thread.run(Thread.java:745) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-12552) Connection Threads Taking Time
[ https://issues.apache.org/jira/browse/SOLR-12552?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Gulam Uddin updated SOLR-12552: --- Description: Connection Threads are taking time * ava.net.SocketInputStream.socketRead0(Native Method) * java.net.SocketInputStream.socketRead(SocketInputStream.java:116) * java.net.SocketInputStream.read(SocketInputStream.java:170) * java.net.SocketInputStream.read(SocketInputStream.java:141) * org.eclipse.jetty.io.ByteArrayBuffer.readFrom(ByteArrayBuffer.java:391) * org.eclipse.jetty.io.bio.StreamEndPoint.fill(StreamEndPoint.java:141) * org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.fill(SocketConnector.java:227) * org.eclipse.jetty.http.HttpParser.fill(HttpParser.java:1040) * org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:280) * org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235) * org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72) * org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264) * org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608) * org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543) * java.lang.Thread.run(Thread.java:745) Any Idea ?? was: Connection Threads are taking time * ava.net.SocketInputStream.socketRead0(Native Method) * java.net.SocketInputStream.socketRead(SocketInputStream.java:116) * java.net.SocketInputStream.read(SocketInputStream.java:170) * java.net.SocketInputStream.read(SocketInputStream.java:141) * org.eclipse.jetty.io.ByteArrayBuffer.readFrom(ByteArrayBuffer.java:391) * org.eclipse.jetty.io.bio.StreamEndPoint.fill(StreamEndPoint.java:141) * org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.fill(SocketConnector.java:227) * org.eclipse.jetty.http.HttpParser.fill(HttpParser.java:1040) * org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:280) * org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235) * org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72) * org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264) * org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608) * org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543) * java.lang.Thread.run(Thread.java:745) > Connection Threads Taking Time > -- > > Key: SOLR-12552 > URL: https://issues.apache.org/jira/browse/SOLR-12552 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Gulam Uddin >Priority: Major > > Connection Threads are taking time > > * ava.net.SocketInputStream.socketRead0(Native Method) > * java.net.SocketInputStream.socketRead(SocketInputStream.java:116) > * java.net.SocketInputStream.read(SocketInputStream.java:170) > * java.net.SocketInputStream.read(SocketInputStream.java:141) > * org.eclipse.jetty.io.ByteArrayBuffer.readFrom(ByteArrayBuffer.java:391) > * org.eclipse.jetty.io.bio.StreamEndPoint.fill(StreamEndPoint.java:141) > * > org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.fill(SocketConnector.java:227) > * org.eclipse.jetty.http.HttpParser.fill(HttpParser.java:1040) > * org.eclipse.jetty.http.HttpParser.parseNext(HttpParser.java:280) > * org.eclipse.jetty.http.HttpParser.parseAvailable(HttpParser.java:235) > * > org.eclipse.jetty.server.BlockingHttpConnection.handle(BlockingHttpConnection.java:72) > * > org.eclipse.jetty.server.bio.SocketConnector$ConnectorEndPoint.run(SocketConnector.java:264) > * > org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:608) > * > org.eclipse.jetty.util.thread.QueuedThreadPool$3.run(QueuedThreadPool.java:543) > * java.lang.Thread.run(Thread.java:745) > Any Idea ?? > -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-7.x-Solaris (64bit/jdk1.8.0) - Build # 722 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Solaris/722/ Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseG1GC 1 tests failed. FAILED: org.apache.solr.client.solrj.io.stream.StreamDecoratorTest.testParallelCommitStream Error Message: expected:<5> but was:<3> Stack Trace: java.lang.AssertionError: expected:<5> but was:<3> at __randomizedtesting.SeedInfo.seed([75107210499CDA03:55FA1010D5DD374F]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.junit.Assert.assertEquals(Assert.java:456) at org.apache.solr.client.solrj.io.stream.StreamDecoratorTest.testParallelCommitStream(StreamDecoratorTest.java:3025) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Build Log: [...truncated 15772 lines...] [junit4] Suite:
[JENKINS] Lucene-Solr-7.x-Linux (64bit/jdk-9.0.4) - Build # 2318 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/2318/ Java: 64bit/jdk-9.0.4 -XX:+UseCompressedOops -XX:+UseParallelGC 9 tests failed. FAILED: org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive Error Message: Jetty Connector is not open: -2 Stack Trace: java.lang.IllegalStateException: Jetty Connector is not open: -2 at __randomizedtesting.SeedInfo.seed([CD20E4772733CD75:4894C8001BCC74ED]:0) at org.apache.solr.client.solrj.embedded.JettySolrRunner.getBaseUrl(JettySolrRunner.java:499) at org.apache.solr.cloud.MiniSolrCloudCluster.getReplicaJetty(MiniSolrCloudCluster.java:539) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:100) at org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive(LeaderTragicEventTest.java:150) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) FAILED: org.apache.solr.cloud.LeaderTragicEventTest.test Error Message: Error
[jira] [Commented] (LUCENE-8263) Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more aggressive merging
[ https://issues.apache.org/jira/browse/LUCENE-8263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543800#comment-16543800 ] Adrien Grand commented on LUCENE-8263: -- bq. I such situations, deletesPctAllowed around 10-15% would make a lot of sense. I say keep the floor at 10%. I'd like to avoid doing that. This option is a bit like the "optimize" button to me: why would I want to waste space for deleted documents? Yet it's hard to think about the consequences of setting this option: the above simulations suggest around 2.1x more merging with 10% of allowed deletes but I wouldn't be surprised that it could be much worse in practice in production under certain conditions. Since Lucene only guarantees something around 50% of deleted documents in the index at most today, I feel like the current patch is significant progress already? Someone who would really want to configure it with 10% deletes could still fork this merge policy? bq. Or maybe simply issue a warning instead? This would be possible from Solr, but Lucene can't do it since it is a library. > Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more > aggressive merging > > > Key: LUCENE-8263 > URL: https://issues.apache.org/jira/browse/LUCENE-8263 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-8263.patch > > > Spinoff of LUCENE-7976 to keep the two issues separate. > The current TMP allows up to 50% deleted docs, which can be wasteful on large > indexes. This parameter will do more aggressive merging of segments with > deleted documents when the _total_ percentage of deleted docs in the entire > index exceeds it. > Setting this to 50% should approximate current behavior. Setting it to 20% > caused the first cut at this to increase I/O roughly 10%. Setting it to 10% > caused about a 50% increase in I/O. > I was conflating the two issues, so I'll change 7976 and comment out the bits > that reference this new parameter. After it's checked in we can bring this > back. That should be less work than reconstructing this later. > Among the questions to be answered: > 1> what should the default be? I propose 20% as it results in significantly > less space wasted and helps control heap usage for a modest increase in I/O. > 2> what should the floor be? I propose 10% with _strong_ documentation > warnings about not setting it below 20%. > 3> should there be two parameters? I think this was discussed somewhat in > 7976. The first cut at this used this number for two purposes: > 3a> the total percentage of deleted docs index-wide to trip this trigger > 3b> the percentage of an _individual_ segment that had to be deleted if the > segment was over maxSegmentSize/2 bytes in order to be eligible for merging. > Empirically, using the same percentage for both caused the merging to hover > around the value specified for this parameter. > My proposal for <3> would be to have the parameter do double-duty. Assuming > my preliminary results hold, you specify this parameter at, say, 20% and once > the index hits that % deleted docs it hovers right around there, even if > you've forceMerged earlier down to 1 segment. This seems in line with what > I'd expect and adding another parameter seems excessively complicated to no > good purpose. We could always add something like that later if we wanted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11598) Export Writer needs to support more than 4 Sort fields - Say 10, ideally it should not be bound at all, but 4 seems to really short sell the StreamRollup capabilities.
[ https://issues.apache.org/jira/browse/SOLR-11598?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543791#comment-16543791 ] Varun Thacker commented on SOLR-11598: -- Patch which builds on top of Amrit's patch Changes: * I've split up the several sub-classes within ExportWriter to their own individual class. It's all contained within one package. Personally I found it easier to read the code after splitting out the classes. It does make the patch a lot bigger though * Adds some extra tests. We weren't testing boolean fields and a scenario where both docs have the same values. This caught a bug in the previous patch where if two docs are the same the higher docId would get selected rather than the lower docId one ( index order ) * One optimization around setting queue size i.e if total hits is less than 30k create the queueSize with totalHits {code:java} int queueSize = 3; if (totalHits < 3) { queueSize = totalHits; }{code} Today when creating the priority queue , we do doc-value seeks for all the sort fields. When we stream out docs we again make doc-value seeks against the fl fields . In most common use-cases I'd imagine fl = sort fields , so if we can pre-collect the values while sorting it , we can halve the doc-value seeks potentially bringing us speed improvements. I believe Amrit is already working on this and can be tackled in another Jira Based on Mark's and David's comments , should we still limit the sort fields to 10 or keep it say 50? I've added this line to the export writer ref guide page with the patch already {code:java} The export performance will get slower as you add more sort fields. If there is enough physical memory available outside of the JVM to load up the sort fields then the performance will be linearly slower with additional of sort fields. It can get worse otherwise.{code} I'll begin some manual testing on this patch > Export Writer needs to support more than 4 Sort fields - Say 10, ideally it > should not be bound at all, but 4 seems to really short sell the StreamRollup > capabilities. > --- > > Key: SOLR-11598 > URL: https://issues.apache.org/jira/browse/SOLR-11598 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: streaming expressions >Affects Versions: 6.6.1, 7.0 >Reporter: Aroop >Assignee: Varun Thacker >Priority: Major > Labels: patch > Attachments: SOLR-11598-6_6-streamtests, SOLR-11598-6_6.patch, > SOLR-11598-master.patch, SOLR-11598.patch, SOLR-11598.patch, > SOLR-11598.patch, SOLR-11598.patch, SOLR-11598.patch, streaming-export > reports.xlsx > > > I am a user of Streaming and I am currently trying to use rollups on an 10 > dimensional document. > I am unable to get correct results on this query as I am bounded by the > limitation of the export handler which supports only 4 sort fields. > I do not see why this needs to be the case, as it could very well be 10 or 20. > My current needs would be satisfied with 10, but one would want to ask why > can't it be any decent integer n, beyond which we know performance degrades, > but even then it should be caveat emptor. > [~varunthacker] > Code Link: > https://github.com/apache/lucene-solr/blob/19db1df81a18e6eb2cce5be973bf2305d606a9f8/solr/core/src/java/org/apache/solr/handler/ExportWriter.java#L455 > Error > null:java.io.IOException: A max of 4 sorts can be specified > at > org.apache.solr.handler.ExportWriter.getSortDoc(ExportWriter.java:452) > at org.apache.solr.handler.ExportWriter.writeDocs(ExportWriter.java:228) > at > org.apache.solr.handler.ExportWriter.lambda$null$1(ExportWriter.java:219) > at > org.apache.solr.common.util.JavaBinCodec.writeIterator(JavaBinCodec.java:664) > at > org.apache.solr.common.util.JavaBinCodec.writeKnownType(JavaBinCodec.java:333) > at > org.apache.solr.common.util.JavaBinCodec.writeVal(JavaBinCodec.java:223) > at org.apache.solr.common.util.JavaBinCodec$1.put(JavaBinCodec.java:394) > at > org.apache.solr.handler.ExportWriter.lambda$null$2(ExportWriter.java:219) > at > org.apache.solr.common.util.JavaBinCodec.writeMap(JavaBinCodec.java:437) > at > org.apache.solr.common.util.JavaBinCodec.writeKnownType(JavaBinCodec.java:354) > at > org.apache.solr.common.util.JavaBinCodec.writeVal(JavaBinCodec.java:223) > at org.apache.solr.common.util.JavaBinCodec$1.put(JavaBinCodec.java:394) > at > org.apache.solr.handler.ExportWriter.lambda$write$3(ExportWriter.java:217) > at >
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543789#comment-16543789 ] Nicholas Knize commented on LUCENE-8396: {quote} But isn't that impossible to detect {quote} Extremely unlikely to next to impossible? Yes, I think so. So I'm pretty confident this logic can be removed. > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, LUCENE-8396.patch, polyWHole.png, > tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11598) Export Writer needs to support more than 4 Sort fields - Say 10, ideally it should not be bound at all, but 4 seems to really short sell the StreamRollup capabilities.
[ https://issues.apache.org/jira/browse/SOLR-11598?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Varun Thacker updated SOLR-11598: - Attachment: SOLR-11598.patch > Export Writer needs to support more than 4 Sort fields - Say 10, ideally it > should not be bound at all, but 4 seems to really short sell the StreamRollup > capabilities. > --- > > Key: SOLR-11598 > URL: https://issues.apache.org/jira/browse/SOLR-11598 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: streaming expressions >Affects Versions: 6.6.1, 7.0 >Reporter: Aroop >Assignee: Varun Thacker >Priority: Major > Labels: patch > Attachments: SOLR-11598-6_6-streamtests, SOLR-11598-6_6.patch, > SOLR-11598-master.patch, SOLR-11598.patch, SOLR-11598.patch, > SOLR-11598.patch, SOLR-11598.patch, SOLR-11598.patch, streaming-export > reports.xlsx > > > I am a user of Streaming and I am currently trying to use rollups on an 10 > dimensional document. > I am unable to get correct results on this query as I am bounded by the > limitation of the export handler which supports only 4 sort fields. > I do not see why this needs to be the case, as it could very well be 10 or 20. > My current needs would be satisfied with 10, but one would want to ask why > can't it be any decent integer n, beyond which we know performance degrades, > but even then it should be caveat emptor. > [~varunthacker] > Code Link: > https://github.com/apache/lucene-solr/blob/19db1df81a18e6eb2cce5be973bf2305d606a9f8/solr/core/src/java/org/apache/solr/handler/ExportWriter.java#L455 > Error > null:java.io.IOException: A max of 4 sorts can be specified > at > org.apache.solr.handler.ExportWriter.getSortDoc(ExportWriter.java:452) > at org.apache.solr.handler.ExportWriter.writeDocs(ExportWriter.java:228) > at > org.apache.solr.handler.ExportWriter.lambda$null$1(ExportWriter.java:219) > at > org.apache.solr.common.util.JavaBinCodec.writeIterator(JavaBinCodec.java:664) > at > org.apache.solr.common.util.JavaBinCodec.writeKnownType(JavaBinCodec.java:333) > at > org.apache.solr.common.util.JavaBinCodec.writeVal(JavaBinCodec.java:223) > at org.apache.solr.common.util.JavaBinCodec$1.put(JavaBinCodec.java:394) > at > org.apache.solr.handler.ExportWriter.lambda$null$2(ExportWriter.java:219) > at > org.apache.solr.common.util.JavaBinCodec.writeMap(JavaBinCodec.java:437) > at > org.apache.solr.common.util.JavaBinCodec.writeKnownType(JavaBinCodec.java:354) > at > org.apache.solr.common.util.JavaBinCodec.writeVal(JavaBinCodec.java:223) > at org.apache.solr.common.util.JavaBinCodec$1.put(JavaBinCodec.java:394) > at > org.apache.solr.handler.ExportWriter.lambda$write$3(ExportWriter.java:217) > at > org.apache.solr.common.util.JavaBinCodec.writeMap(JavaBinCodec.java:437) > at org.apache.solr.handler.ExportWriter.write(ExportWriter.java:215) > at org.apache.solr.core.SolrCore$3.write(SolrCore.java:2601) > at > org.apache.solr.response.QueryResponseWriterUtil.writeQueryResponse(QueryResponseWriterUtil.java:49) > at > org.apache.solr.servlet.HttpSolrCall.writeResponse(HttpSolrCall.java:809) > at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:538) > at > org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:361) > at > org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:305) > at > org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1691) > at > org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:582) > at > org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143) > at > org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:548) > at > org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:226) > at > org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1180) > at > org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:512) > at > org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:185) > at > org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1112) > at > org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141) > at > org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:213) > at >
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543776#comment-16543776 ] David Smiley commented on LUCENE-8396: -- I took a peek at the patch. One bit surprised me. There is an optimization with a comment as follows: {noformat} // If all docs have exactly one value and the cost is greater // than half the leaf size then maybe we can make things faster // by computing the set of documents that do NOT match the query {noformat} But isn't that impossible to detect when the indexed data is comprised of multiple triangles per document? It uses PointValues.size() to detect this but that value isn't useful here, right? I'm guessing you copy-pasted this logic for LatLonPoint code where it does apply. > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, polyWHole.png, tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12551) Upgrade to Tika 1.18
[ https://issues.apache.org/jira/browse/SOLR-12551?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543764#comment-16543764 ] Tim Allison commented on SOLR-12551: Yes. Why, yes I do. Thank you! > Upgrade to Tika 1.18 > > > Key: SOLR-12551 > URL: https://issues.apache.org/jira/browse/SOLR-12551 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tim Allison >Priority: Minor > Time Spent: 10m > Remaining Estimate: 0h > > Until 1.19 is ready (SOLR-12423), let's upgrade to 1.18. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8291) Possible security issue when parsing XML documents containing external entity references
[ https://issues.apache.org/jira/browse/LUCENE-8291?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543763#comment-16543763 ] Uwe Schindler commented on LUCENE-8291: --- Hi [~Oyeme], I think your are in the wrong issue. This is talking about something completely different. But to answer your question about DIH: You can still do this - but you cannot use absolute paths anymore. All xincludes must use relative (!) paths that don't escape the Solr home directory. > Possible security issue when parsing XML documents containing external entity > references > > > Key: LUCENE-8291 > URL: https://issues.apache.org/jira/browse/LUCENE-8291 > Project: Lucene - Core > Issue Type: Bug > Components: modules/queryparser >Affects Versions: 7.2.1 >Reporter: Hendrik Saly >Assignee: Uwe Schindler >Priority: Major > Fix For: 7.4, master (8.0) > > Attachments: LUCENE-8291-2.patch, LUCENE-8291.patch > > > It appears that in QueryTemplateManager.java lines 149 and 198 and in > DOMUtils.java line 204 XML is parsed without disabling external entity > references (XXE). This is described in > [http://cwe.mitre.org/data/definitions/611.html] and possible mitigations are > listed here: > [https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Prevention_Cheat_Sheet] > All recent versions of lucene are affected. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12551) Upgrade to Tika 1.18
[ https://issues.apache.org/jira/browse/SOLR-12551?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543760#comment-16543760 ] Erick Erickson commented on SOLR-12551: --- This works for me. Do you need someone to be committer-fingers for this? If so I'll grab it and we can make it work. Erick > Upgrade to Tika 1.18 > > > Key: SOLR-12551 > URL: https://issues.apache.org/jira/browse/SOLR-12551 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tim Allison >Priority: Minor > Time Spent: 10m > Remaining Estimate: 0h > > Until 1.19 is ready (SOLR-12423), let's upgrade to 1.18. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8345) Add wrapper class constructors to forbiddenapis
[ https://issues.apache.org/jira/browse/LUCENE-8345?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543758#comment-16543758 ] Uwe Schindler commented on LUCENE-8345: --- Oh sorry, I will commit this later. I was a bit busy! I'd like to add the new String(String) constructor, too. But we can also do this in a separate issue. > Add wrapper class constructors to forbiddenapis > --- > > Key: LUCENE-8345 > URL: https://issues.apache.org/jira/browse/LUCENE-8345 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael Braun >Assignee: Uwe Schindler >Priority: Minor > Time Spent: 20m > Remaining Estimate: 0h > > Wrapper classes for the Java primitives (Boolean, Byte, Short, Character, > Integer, Long, Float, Double) have constructors which will always create new > objects. These constructors are officially deprecated as of Java 9 and it is > recommended to use the public static methods since these can reuse the same > underlying objects. In 99% of cases we should be doing this, so these > constructors should be added to forbiddenapis and code corrected to use > autoboxing or call the static methods (.valueOf, .parse*) explicitly. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
RE: FW: [JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-11-ea+21) - Build # 2276 - Unstable!
Hi Alan, hi Rory, Thanks for the update. The NPE no longer happens with JDK 11 ea build 22, but - unfortunately - it now breaks a bit later, also inside Apache Http Client when calling the SSLSocketImpl: [junit4] 2> Caused by: javax.net.ssl.SSLHandshakeException: Remote host terminated the handshake [junit4] 2>at sun.security.ssl.SSLSocketImpl.handleEOF(SSLSocketImpl.java:1121) ~[?:?] [junit4] 2>at sun.security.ssl.SSLSocketImpl.decode(SSLSocketImpl.java:885) ~[?:?] [junit4] 2>at sun.security.ssl.SSLSocketImpl.readRecord(SSLSocketImpl.java:810) ~[?:?] [junit4] 2>at sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:383) ~[?:?] [junit4] 2>at org.apache.http.conn.ssl.SSLConnectionSocketFactory.createLayeredSocket(SSLConnectionSocketFactory.java:396) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.conn.ssl.SSLConnectionSocketFactory.connectSocket(SSLConnectionSocketFactory.java:355) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.conn.DefaultHttpClientConnectionOperator.connect(DefaultHttpClientConnectionOperator.java:142) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.conn.PoolingHttpClientConnectionManager.connect(PoolingHttpClientConnectionManager.java:359) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.execchain.MainClientExec.establishRoute(MainClientExec.java:381) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:237) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:185) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:111) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56) ~[httpclient-4.5.3.jar:4.5.3] [junit4] 2>at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:542) ~[java/:?] [junit4] 2>... 12 more The number of tests failing is therefore the same (it varies a bit, because the communication to solr is done with HTTPS randomly). I think this is a new bug relating to the TLS 1.3 integration. A bit of background: This is Apache's HTTPClient talking to a Jetty 9.3 server on 127.0.0.1. Once I have some time on the weekend or next week, I will setup a simple Jetty webserver and will use Apache's HttpClient to talk to it. Uwe > -Original Message- > From: Uwe Schindler > Sent: Friday, July 13, 2018 11:59 AM > To: 'Alan Bateman' ; 'Rory O'Donnell' > > Subject: RE: FW: [JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-11-ea+21) - > Build # 2276 - Unstable! > > Hi Alan, > > I installed this version on Linux and Windows Jenkins. First build is running! > https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/2315/ > > Uwe > > > > -Original Message- > > From: Alan Bateman > > Sent: Friday, July 13, 2018 8:43 AM > > To: Uwe Schindler ; 'Rory O'Donnell' > > > > Subject: Re: FW: [JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-11-ea+21) - > > Build # 2276 - Unstable! > > > > > > jdk-11+22 has been published and includes this fix so I hope you will be > > able to test Lucene with this build. > > > > -Alan > > > > > > On 07/07/2018 14:26, Alan Bateman wrote: > > > > > > There was a regression in jdk-11+20 after the TLS 1.3 integration. The > > > bug is JDK-8206355 [1] and the fix is in jdk/jdk11 so should be next > > > week's build (jdk+11+22). > > > > > > -Alan > > > > > > [1] https://bugs.openjdk.java.net/browse/JDK-8206355 > > > - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543748#comment-16543748 ] David Smiley commented on LUCENE-8396: -- I definitely understand that this new shape indexing scheme is better than serialized geometry + a grid, and why it is – no need to sell me :). The benchmarks will show an amazing improvement, I'm sure – especially for shapes with a high number of vertexes. I want to list some use-cases in which spatial-extras is (still) useful. This list might go into documentation for spatial-extras. A new LatLonShape here reduced the list somewhat – certainly for a common case (albeit indexing anything other than points simply isn't common among search/Lucene users). Perhaps in time LatLonShape will mature in capability enough for us to remove spatial-extras if there's not much worth saving for the complexity. It'd be helpful if you could validate your understanding against mine below. * Additional shape predicates (other than simply Intersects): Contains, IsWithin. * Indexing circles * Indexing linestrings * Indexing in a user defined Euclidean 2D coordinate space (i.e. other than -180/180, -90/90). ** query by Euclidean circle (not sphere) * Indexing surface-of-sphere/ellipsoid shapes (Geo3D, S2PrefixTree) * Heatmaps using pre-indexed cells * Date range indexes aligned to meaningful units > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, polyWHole.png, tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-12551) Upgrade to Tika 1.18
[ https://issues.apache.org/jira/browse/SOLR-12551?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543732#comment-16543732 ] Tim Allison commented on SOLR-12551: I did the full integration tests with this against all of Tika's test files (with ucar files removed). > Upgrade to Tika 1.18 > > > Key: SOLR-12551 > URL: https://issues.apache.org/jira/browse/SOLR-12551 > Project: Solr > Issue Type: Task > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tim Allison >Priority: Minor > Time Spent: 10m > Remaining Estimate: 0h > > Until 1.19 is ready (SOLR-12423), let's upgrade to 1.18. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] lucene-solr pull request #419: SOLR-12551 - upgrade to Tika 1.18, first draf...
GitHub user tballison opened a pull request: https://github.com/apache/lucene-solr/pull/419 SOLR-12551 - upgrade to Tika 1.18, first draft You can merge this pull request into a Git repository by running: $ git pull https://github.com/tballison/lucene-solr jira/SOLR-12551 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/lucene-solr/pull/419.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #419 commit 9dfa7a4419e00892f51ab925f3e33c135463eec9 Author: TALLISON Date: 2018-07-13T21:12:51Z SOLR-12551 - upgrade to Tika 1.18, first draft --- - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] lucene-solr pull request #418: SOLR-12423 - upgrade to Tika 1.18, first draf...
Github user tballison closed the pull request at: https://github.com/apache/lucene-solr/pull/418 --- - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[GitHub] lucene-solr pull request #418: SOLR-12423 - upgrade to Tika 1.18, first draf...
GitHub user tballison opened a pull request: https://github.com/apache/lucene-solr/pull/418 SOLR-12423 - upgrade to Tika 1.18, first draft You can merge this pull request into a Git repository by running: $ git pull https://github.com/tballison/lucene-solr jira/SOLR-12423 Alternatively you can review and apply these changes as the patch at: https://github.com/apache/lucene-solr/pull/418.patch To close this pull request, make a commit to your master/trunk branch with (at least) the following in the commit message: This closes #418 commit 2577afbf6c2f64f5ac1052a80954973a12f22c92 Author: TALLISON Date: 2018-07-13T21:12:51Z SOLR-12423 - upgrade to Tika 1.18, first draft --- - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Comment Edited] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543708#comment-16543708 ] Nicholas Knize edited comment on LUCENE-8396 at 7/13/18 9:02 PM: - Thanks [~jpountz], [~mikemccand], and [~dsmiley] some great feedback here: Great suggestions [~jpountz]. I'll make these changes, clean up the code, and push to sandbox for iterating. [~dsmiley] {quote}There are also some off the shelf computational geometry libraries that can simplify polygons (JTS can), and some users may want to do that when indexing shapes. {quote} I took a look at several of the OTS simplification a while ago. I'm sure there are many more out there than what I canvassed so this isn't intended to be a catch all comment. And we could certainly look at other tessellation libraries in the future for iterative performance improvements. But I did look at the JTS simplification tools. The problem with those varied: from tolerance value errors creating invalid polygons to general performance overhead. Each were great as a stand alone utility, but none were reliable nor performant enough for the scale desired. {quote}they ought to use SerializedDVStrategy for accuracy combined with RecursivePrefixTreeStrategy for a grid index {quote} Indeed this is an optimization allowing one to index "larger" quad cells (to reduce the number of terms in the inverted index) and subsequently use the WKB {{BinaryDocValues}} for accurate relations. But not only is the use of BinaryDocValues with WKB limiting (with respect to shapes with large number of vertices and the marshalling/unmarshalling overhead) but all of the calls to {{.relate}} (with either JTS, S2, or other third party implementations) incur their own performance penalties.. not just for relating each quad cell to the query shape but also with having to unmarshall "leaf shapes" and compute the DE9IM to relate those to the query shape for accuracy. {quote}combined with...and...wrapping both {quote} This was the other motivator... Like {{LatLonPoint}} its nice to have a simple API (hopefully intended for core) that can solve the general Shape indexing and search use case without having to decide which PrefixTree, Grid, and Shape Relation libraries to use. Then for the more expert use cases users can always move over to {{spatial-extras}} for all of the additional choices. was (Author: nknize): Thanks [~jpountz], [~mikemccand], and [~dsmiley] some great feedback here: Great suggestions [~jpountz]. I'll make these changes, clean up the code, and push to sandbox for iterating. [~dsmiley] {quote}There are also some off the shelf computational geometry libraries that can simplify polygons (JTS can), and some users may want to do that when indexing shapes. {quote} I took a look at several of the OTS simplification a while ago. I'm sure there are many more out there than what I canvassed so this isn't intended to be a catch all comment. And we could certainly look at other tessellation libraries in the future for iterative performance improvements. But I did look at the JTS simplification tools. The problem with those varied: from tolerance value errors creating invalid polygons to general performance overhead. Each were great as a stand alone utility, but none were reliable nor performant enough for the scale desired. {quote}they ought to use SerializedDVStrategy for accuracy combined with RecursivePrefixTreeStrategy for a grid index {quote} Indeed this is an optimization allowing one to index "larger" quad cells (to avoid all of the terms in the inverted index) and subsequently use the WKB {{BinaryDocValues}} for accurate relations. But not only is the use of BinaryDocValues with WKB limiting (with respect to shapes with large number of vertices and the marshalling/unmarshalling overhead) but all of the calls to {{.relate}} (with either JTS, S2, or other third party implementations) incur their own performance penalties.. not just for relating each quad cell to the query shape but also with having to unmarshall "leaf shapes" and compute the DE9IM to relate those to the query shape for accuracy. {quote}combined with...and...wrapping both {quote} This was the other motivator... Like {{LatLonPoint}} its nice to have a simple API (hopefully intended for core) that can solve the general Shape indexing and search use case without having to decide which PrefixTree, Grid, and Shape Relation libraries to use. Then for the more expert use cases users can always move over to {{spatial-extras}} for all of the additional choices. > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543644#comment-16543644 ] David Smiley commented on LUCENE-8396: -- +1 super cool Nick! I like how the use of a tessellation technique can represent polygons with a number of triangles on the order of the number of edge vertices, and you wind up with perfect accuracy & scalability. There are also some off the shelf computational geometry libraries that can simplify polygons (JTS can), and some users may want to do that when indexing shapes. In your example you show a quad grid index of today decomposing a shape into a million terms. Why would someone do this? If someone wants to index shapes today with spatial-extras, they ought to use SerializedDVStrategy for accuracy combined with RecursivePrefixTreeStrategy for a grid index (perhaps 20% distErrPct) and CompositeSpatialStrategy wrapping both. The number of terms for any shape is effectively capped and controlled indirectly via distErrPct – perhaps 100 terms for distErrPct=0.2? (not sure without trying). > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, polyWHole.png, tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Comment Edited] (LUCENE-8263) Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more aggressive merging
[ https://issues.apache.org/jira/browse/LUCENE-8263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543616#comment-16543616 ] Marc Morissette edited comment on LUCENE-8263 at 7/13/18 7:37 PM: -- I would like to argue against a 20% floor. Some indexes contain documents of wildly different sizes with the larger documents experiencing much higher turnover. I have seen indexes with around 20% deletions that were more than 2x their optimized size because of this phenomenon. I such situations, deletesPctAllowed around 10-15% would make a lot of sense. I say keep the floor at 10%. Or maybe simply issue a warning instead? was (Author: marc.morissette): I would like to argue against a 20% floor. Some indexes contain documents of wildly different sizes with the larger documents experiencing much higher turnover. I have seen indexes with around 20% deletions that were more than 2x their optimized size because of this phenomenon. I such situations, deletesPctAllowed around 10-15% would make a lot of sense. I say keep the floor at 10%. > Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more > aggressive merging > > > Key: LUCENE-8263 > URL: https://issues.apache.org/jira/browse/LUCENE-8263 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-8263.patch > > > Spinoff of LUCENE-7976 to keep the two issues separate. > The current TMP allows up to 50% deleted docs, which can be wasteful on large > indexes. This parameter will do more aggressive merging of segments with > deleted documents when the _total_ percentage of deleted docs in the entire > index exceeds it. > Setting this to 50% should approximate current behavior. Setting it to 20% > caused the first cut at this to increase I/O roughly 10%. Setting it to 10% > caused about a 50% increase in I/O. > I was conflating the two issues, so I'll change 7976 and comment out the bits > that reference this new parameter. After it's checked in we can bring this > back. That should be less work than reconstructing this later. > Among the questions to be answered: > 1> what should the default be? I propose 20% as it results in significantly > less space wasted and helps control heap usage for a modest increase in I/O. > 2> what should the floor be? I propose 10% with _strong_ documentation > warnings about not setting it below 20%. > 3> should there be two parameters? I think this was discussed somewhat in > 7976. The first cut at this used this number for two purposes: > 3a> the total percentage of deleted docs index-wide to trip this trigger > 3b> the percentage of an _individual_ segment that had to be deleted if the > segment was over maxSegmentSize/2 bytes in order to be eligible for merging. > Empirically, using the same percentage for both caused the merging to hover > around the value specified for this parameter. > My proposal for <3> would be to have the parameter do double-duty. Assuming > my preliminary results hold, you specify this parameter at, say, 20% and once > the index hits that % deleted docs it hovers right around there, even if > you've forceMerged earlier down to 1 segment. This seems in line with what > I'd expect and adding another parameter seems excessively complicated to no > good purpose. We could always add something like that later if we wanted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8263) Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more aggressive merging
[ https://issues.apache.org/jira/browse/LUCENE-8263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543616#comment-16543616 ] Marc Morissette commented on LUCENE-8263: - I would like to argue against a 20% floor. Some indexes contain documents of wildly different sizes with the larger documents experiencing much higher turnover. I have seen indexes with around 20% deletions that were more than 2x their optimized size because of this phenomenon. I such situations, deletesPctAllowed around 10-15% would make a lot of sense. I say keep the floor at 10%. > Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more > aggressive merging > > > Key: LUCENE-8263 > URL: https://issues.apache.org/jira/browse/LUCENE-8263 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-8263.patch > > > Spinoff of LUCENE-7976 to keep the two issues separate. > The current TMP allows up to 50% deleted docs, which can be wasteful on large > indexes. This parameter will do more aggressive merging of segments with > deleted documents when the _total_ percentage of deleted docs in the entire > index exceeds it. > Setting this to 50% should approximate current behavior. Setting it to 20% > caused the first cut at this to increase I/O roughly 10%. Setting it to 10% > caused about a 50% increase in I/O. > I was conflating the two issues, so I'll change 7976 and comment out the bits > that reference this new parameter. After it's checked in we can bring this > back. That should be less work than reconstructing this later. > Among the questions to be answered: > 1> what should the default be? I propose 20% as it results in significantly > less space wasted and helps control heap usage for a modest increase in I/O. > 2> what should the floor be? I propose 10% with _strong_ documentation > warnings about not setting it below 20%. > 3> should there be two parameters? I think this was discussed somewhat in > 7976. The first cut at this used this number for two purposes: > 3a> the total percentage of deleted docs index-wide to trip this trigger > 3b> the percentage of an _individual_ segment that had to be deleted if the > segment was over maxSegmentSize/2 bytes in order to be eligible for merging. > Empirically, using the same percentage for both caused the merging to hover > around the value specified for this parameter. > My proposal for <3> would be to have the parameter do double-duty. Assuming > my preliminary results hold, you specify this parameter at, say, 20% and once > the index hits that % deleted docs it hovers right around there, even if > you've forceMerged earlier down to 1 segment. This seems in line with what > I'd expect and adding another parameter seems excessively complicated to no > good purpose. We could always add something like that later if we wanted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-master - Build # 2599 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/2599/ 3 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.prometheus.collector.SolrCollectorTest Error Message: Error from server at http://127.0.0.1:42055/solr: KeeperErrorCode = NoNode for /overseer/collection-queue-work/qnr-00 Stack Trace: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://127.0.0.1:42055/solr: KeeperErrorCode = NoNode for /overseer/collection-queue-work/qnr-00 at __randomizedtesting.SeedInfo.seed([F2DC59E0F2C62B7]:0) at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1106) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:886) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:819) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211) at org.apache.solr.prometheus.exporter.SolrExporterTestBase.setupCluster(SolrExporterTestBase.java:48) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:874) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: org.apache.solr.cloud.LeaderTragicEventTest.test Error Message: org.apache.lucene.store.RawDirectoryWrapper cannot be cast to org.apache.lucene.store.MockDirectoryWrapper Stack Trace: java.lang.ClassCastException: org.apache.lucene.store.RawDirectoryWrapper cannot be cast to org.apache.lucene.store.MockDirectoryWrapper at __randomizedtesting.SeedInfo.seed([82711D083FDC990F:A2522D29120F4F7]:0) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:102) at org.apache.solr.cloud.LeaderTragicEventTest.test(LeaderTragicEventTest.java:74) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-9.0.4) - Build # 22449 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/22449/ Java: 64bit/jdk-9.0.4 -XX:+UseCompressedOops -XX:+UseG1GC 4 tests failed. FAILED: org.apache.solr.handler.admin.SegmentsInfoRequestHandlerTest.testSegmentInfosVersion Error Message: Exception during query Stack Trace: java.lang.RuntimeException: Exception during query at __randomizedtesting.SeedInfo.seed([195B2AFA3B0961E6:E185BF124365B0B5]:0) at org.apache.solr.SolrTestCaseJ4.assertQ(SolrTestCaseJ4.java:917) at org.apache.solr.handler.admin.SegmentsInfoRequestHandlerTest.testSegmentInfosVersion(SegmentsInfoRequestHandlerTest.java:68) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) Caused by: java.lang.RuntimeException: REQUEST FAILED: xpath=2=count(//lst[@name='segments']/lst/str[@name='version'][.='8.0.0']) xml response was:
[jira] [Commented] (LUCENE-8263) Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more aggressive merging
[ https://issues.apache.org/jira/browse/LUCENE-8263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543465#comment-16543465 ] Erick Erickson commented on LUCENE-8263: +1 to the patch. There were some weird edge cases in TestTieredMergePolicy that only came to light when I beasted it, so I'll run a few thousand iterations over the weekend and report back if any pop out. Your simulations numbers square pretty well with mine when I was doing this one at the same time as 7976. I originally advocated _not_ putting a floor on the percentage and providing users with one more way to shoot themselves in the foot. I've changed my mind on that, I think 20% is fine. Now that they can forceMerge or expungeDeletes without creating massive segments, I don't think there's any good (or even bad) reason to allow < 20%. Thanks again for working on this and your help with 7976. Much appreciated. > Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more > aggressive merging > > > Key: LUCENE-8263 > URL: https://issues.apache.org/jira/browse/LUCENE-8263 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-8263.patch > > > Spinoff of LUCENE-7976 to keep the two issues separate. > The current TMP allows up to 50% deleted docs, which can be wasteful on large > indexes. This parameter will do more aggressive merging of segments with > deleted documents when the _total_ percentage of deleted docs in the entire > index exceeds it. > Setting this to 50% should approximate current behavior. Setting it to 20% > caused the first cut at this to increase I/O roughly 10%. Setting it to 10% > caused about a 50% increase in I/O. > I was conflating the two issues, so I'll change 7976 and comment out the bits > that reference this new parameter. After it's checked in we can bring this > back. That should be less work than reconstructing this later. > Among the questions to be answered: > 1> what should the default be? I propose 20% as it results in significantly > less space wasted and helps control heap usage for a modest increase in I/O. > 2> what should the floor be? I propose 10% with _strong_ documentation > warnings about not setting it below 20%. > 3> should there be two parameters? I think this was discussed somewhat in > 7976. The first cut at this used this number for two purposes: > 3a> the total percentage of deleted docs index-wide to trip this trigger > 3b> the percentage of an _individual_ segment that had to be deleted if the > segment was over maxSegmentSize/2 bytes in order to be eligible for merging. > Empirically, using the same percentage for both caused the merging to hover > around the value specified for this parameter. > My proposal for <3> would be to have the parameter do double-duty. Assuming > my preliminary results hold, you specify this parameter at, say, 20% and once > the index hits that % deleted docs it hovers right around there, even if > you've forceMerged earlier down to 1 segment. This seems in line with what > I'd expect and adding another parameter seems excessively complicated to no > good purpose. We could always add something like that later if we wanted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-7.x-Linux (64bit/jdk1.8.0_172) - Build # 2316 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/2316/ Java: 64bit/jdk1.8.0_172 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC 9 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestHdfsBackupRestoreCore Error Message: Suite timeout exceeded (>= 720 msec). Stack Trace: java.lang.Exception: Suite timeout exceeded (>= 720 msec). at __randomizedtesting.SeedInfo.seed([6B7AB9A74EAE55C5]:0) FAILED: org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive Error Message: Jetty Connector is not open: -2 Stack Trace: java.lang.IllegalStateException: Jetty Connector is not open: -2 at __randomizedtesting.SeedInfo.seed([6B7AB9A74EAE55C5:EECE95D07251EC5D]:0) at org.apache.solr.client.solrj.embedded.JettySolrRunner.getBaseUrl(JettySolrRunner.java:499) at org.apache.solr.cloud.MiniSolrCloudCluster.getReplicaJetty(MiniSolrCloudCluster.java:539) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:100) at org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive(LeaderTragicEventTest.java:150) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at
[JENKINS-EA] Lucene-Solr-7.x-Windows (64bit/jdk-11-ea+22) - Build # 688 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Windows/688/ Java: 64bit/jdk-11-ea+22 -XX:-UseCompressedOops -XX:+UseSerialGC 22 tests failed. FAILED: org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive Error Message: Jetty Connector is not open: -2 Stack Trace: java.lang.IllegalStateException: Jetty Connector is not open: -2 at __randomizedtesting.SeedInfo.seed([F669AE74DC1324EC:73DD8203E0EC9D74]:0) at org.apache.solr.client.solrj.embedded.JettySolrRunner.getBaseUrl(JettySolrRunner.java:499) at org.apache.solr.cloud.MiniSolrCloudCluster.getReplicaJetty(MiniSolrCloudCluster.java:539) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:100) at org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive(LeaderTragicEventTest.java:150) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:566) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:834) FAILED:
[JENKINS] Lucene-Solr-repro - Build # 968 - Unstable
Build: https://builds.apache.org/job/Lucene-Solr-repro/968/ [...truncated 36 lines...] [repro] Jenkins log URL: https://builds.apache.org/job/Lucene-Solr-Tests-master/2598/consoleText [repro] Revision: 8997d41357ab95eaa88702b93c75de4b1813457b [repro] Repro line: ant test -Dtestcase=LeaderTragicEventTest -Dtests.method=testOtherReplicasAreNotActive -Dtests.seed=8011C67BC3D3A227 -Dtests.multiplier=2 -Dtests.slow=true -Dtests.locale=pt-PT -Dtests.timezone=America/Ojinaga -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [repro] git rev-parse --abbrev-ref HEAD [repro] git rev-parse HEAD [repro] Initial local git branch/revision: 07de1dc011015ea7a344cda419e78397d00987c8 [repro] git fetch [repro] git checkout 8997d41357ab95eaa88702b93c75de4b1813457b [...truncated 2 lines...] [repro] git merge --ff-only [...truncated 1 lines...] [repro] ant clean [...truncated 6 lines...] [repro] Test suites by module: [repro]solr/core [repro] LeaderTragicEventTest [repro] ant compile-test [...truncated 3300 lines...] [repro] ant test-nocompile -Dtests.dups=5 -Dtests.maxfailures=5 -Dtests.class="*.LeaderTragicEventTest" -Dtests.showOutput=onerror -Dtests.seed=8011C67BC3D3A227 -Dtests.multiplier=2 -Dtests.slow=true -Dtests.locale=pt-PT -Dtests.timezone=America/Ojinaga -Dtests.asserts=true -Dtests.file.encoding=US-ASCII [...truncated 3255 lines...] [repro] Setting last failure code to 256 [repro] Failures: [repro] 3/5 failed: org.apache.solr.cloud.LeaderTragicEventTest [repro] git checkout 07de1dc011015ea7a344cda419e78397d00987c8 [...truncated 2 lines...] [repro] Exiting with code 256 [...truncated 6 lines...] - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8263) Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more aggressive merging
[ https://issues.apache.org/jira/browse/LUCENE-8263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543375#comment-16543375 ] Adrien Grand commented on LUCENE-8263: -- bq. It's surprising how many tests relied on the behavior of the default merge policy. I didn't find them all yet. :) bq. the number of deletions that will be reclaimed is the deletion count for that segment when the merge first kicked off Good point, I will add a comment about that. I don't think we can easily know the number of deletes that will be reclaimed, can we? If not I think the current approach of underestimating deletes is good since it means less merging. Like you said, the logic becomes correct again when the merge is over. Thanks for looking! > Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more > aggressive merging > > > Key: LUCENE-8263 > URL: https://issues.apache.org/jira/browse/LUCENE-8263 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-8263.patch > > > Spinoff of LUCENE-7976 to keep the two issues separate. > The current TMP allows up to 50% deleted docs, which can be wasteful on large > indexes. This parameter will do more aggressive merging of segments with > deleted documents when the _total_ percentage of deleted docs in the entire > index exceeds it. > Setting this to 50% should approximate current behavior. Setting it to 20% > caused the first cut at this to increase I/O roughly 10%. Setting it to 10% > caused about a 50% increase in I/O. > I was conflating the two issues, so I'll change 7976 and comment out the bits > that reference this new parameter. After it's checked in we can bring this > back. That should be less work than reconstructing this later. > Among the questions to be answered: > 1> what should the default be? I propose 20% as it results in significantly > less space wasted and helps control heap usage for a modest increase in I/O. > 2> what should the floor be? I propose 10% with _strong_ documentation > warnings about not setting it below 20%. > 3> should there be two parameters? I think this was discussed somewhat in > 7976. The first cut at this used this number for two purposes: > 3a> the total percentage of deleted docs index-wide to trip this trigger > 3b> the percentage of an _individual_ segment that had to be deleted if the > segment was over maxSegmentSize/2 bytes in order to be eligible for merging. > Empirically, using the same percentage for both caused the merging to hover > around the value specified for this parameter. > My proposal for <3> would be to have the parameter do double-duty. Assuming > my preliminary results hold, you specify this parameter at, say, 20% and once > the index hits that % deleted docs it hovers right around there, even if > you've forceMerged earlier down to 1 segment. This seems in line with what > I'd expect and adding another parameter seems excessively complicated to no > good purpose. We could always add something like that later if we wanted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8345) Add wrapper class constructors to forbiddenapis
[ https://issues.apache.org/jira/browse/LUCENE-8345?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543373#comment-16543373 ] Michael Braun commented on LUCENE-8345: --- [~thetaphi] do you want me to update the patch for the latest trunk? Also should the String constructor be added? > Add wrapper class constructors to forbiddenapis > --- > > Key: LUCENE-8345 > URL: https://issues.apache.org/jira/browse/LUCENE-8345 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael Braun >Assignee: Uwe Schindler >Priority: Minor > Time Spent: 20m > Remaining Estimate: 0h > > Wrapper classes for the Java primitives (Boolean, Byte, Short, Character, > Integer, Long, Float, Double) have constructors which will always create new > objects. These constructors are officially deprecated as of Java 9 and it is > recommended to use the public static methods since these can reuse the same > underlying objects. In 99% of cases we should be doing this, so these > constructors should be added to forbiddenapis and code corrected to use > autoboxing or call the static methods (.valueOf, .parse*) explicitly. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8263) Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more aggressive merging
[ https://issues.apache.org/jira/browse/LUCENE-8263?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543346#comment-16543346 ] Michael McCandless commented on LUCENE-8263: +1, patch looks great, and those write amplification simulation experiments are wonderful; a default of 33% makes sense. It's much more intuitive to the user to set the limit on overall % deletes, than the cryptic existing {{reclaimDeletesWeight}}. It's surprising how many tests relied on the behavior of the default merge policy. One question about this comment: {quote}// if this segment is merging, then its deletes are being reclaimed already. + // only count live docs in the total max doc{quote} It's true that a merging segments will have deletions reclaimed, but, the number of deletions that will be reclaimed is the deletion count for that segment when the merge first kicked off. Any new deletions that accumulate on that segment, won't be merged away, and will carry over to the merged segment, yet I think the logic in the patch will "pretend" those carry over deletions will also be merged away, because the {{MergePolicy.size}} method checks the live deletion count. I don't think we need to fix this here ... and, once the merge finishes, and the deletes carry over, the logic will then be correct when considering that merged segment for further merging. > Add indexPctDeletedTarget as a parameter to TieredMergePolicy to control more > aggressive merging > > > Key: LUCENE-8263 > URL: https://issues.apache.org/jira/browse/LUCENE-8263 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Erick Erickson >Assignee: Erick Erickson >Priority: Major > Attachments: LUCENE-8263.patch > > > Spinoff of LUCENE-7976 to keep the two issues separate. > The current TMP allows up to 50% deleted docs, which can be wasteful on large > indexes. This parameter will do more aggressive merging of segments with > deleted documents when the _total_ percentage of deleted docs in the entire > index exceeds it. > Setting this to 50% should approximate current behavior. Setting it to 20% > caused the first cut at this to increase I/O roughly 10%. Setting it to 10% > caused about a 50% increase in I/O. > I was conflating the two issues, so I'll change 7976 and comment out the bits > that reference this new parameter. After it's checked in we can bring this > back. That should be less work than reconstructing this later. > Among the questions to be answered: > 1> what should the default be? I propose 20% as it results in significantly > less space wasted and helps control heap usage for a modest increase in I/O. > 2> what should the floor be? I propose 10% with _strong_ documentation > warnings about not setting it below 20%. > 3> should there be two parameters? I think this was discussed somewhat in > 7976. The first cut at this used this number for two purposes: > 3a> the total percentage of deleted docs index-wide to trip this trigger > 3b> the percentage of an _individual_ segment that had to be deleted if the > segment was over maxSegmentSize/2 bytes in order to be eligible for merging. > Empirically, using the same percentage for both caused the merging to hover > around the value specified for this parameter. > My proposal for <3> would be to have the parameter do double-duty. Assuming > my preliminary results hold, you specify this parameter at, say, 20% and once > the index hits that % deleted docs it hovers right around there, even if > you've forceMerged earlier down to 1 segment. This seems in line with what > I'd expect and adding another parameter seems excessively complicated to no > good purpose. We could always add something like that later if we wanted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-NightlyTests-master - Build # 1584 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1584/ 1 tests failed. FAILED: org.apache.solr.update.TestInPlaceUpdatesDistrib.test Error Message: Timeout occured while waiting response from server at: https://127.0.0.1:55775/collection1 Stack Trace: org.apache.solr.client.solrj.SolrServerException: Timeout occured while waiting response from server at: https://127.0.0.1:55775/collection1 at __randomizedtesting.SeedInfo.seed([48BDE1703E2A6719:C0E9DEAA90D60AE1]:0) at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:654) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1106) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:886) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:819) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrClient.commit(SolrClient.java:484) at org.apache.solr.client.solrj.SolrClient.commit(SolrClient.java:463) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.commit(AbstractFullDistribZkTestBase.java:1591) at org.apache.solr.update.TestInPlaceUpdatesDistrib.docValuesUpdateTest(TestInPlaceUpdatesDistrib.java:353) at org.apache.solr.update.TestInPlaceUpdatesDistrib.test(TestInPlaceUpdatesDistrib.java:145) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:1008) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:983) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at
[jira] [Created] (SOLR-12551) Upgrade to Tika 1.18
Tim Allison created SOLR-12551: -- Summary: Upgrade to Tika 1.18 Key: SOLR-12551 URL: https://issues.apache.org/jira/browse/SOLR-12551 Project: Solr Issue Type: Task Security Level: Public (Default Security Level. Issues are Public) Reporter: Tim Allison Until 1.19 is ready (SOLR-12423), let's upgrade to 1.18. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543272#comment-16543272 ] ASF subversion and git services commented on LUCENE-8397: - Commit 82388667bf5cf54e6dac7346cc323a516b9eca2a in lucene-solr's branch refs/heads/branch_7x from Mike McCandless [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=8238866 ] LUCENE-8397: add DirectoryTaxonomyWriter.getCache > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Fix For: master (8.0), 7.5 > > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Michael McCandless resolved LUCENE-8397. Resolution: Fixed Fix Version/s: 7.5 master (8.0) > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Fix For: master (8.0), 7.5 > > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543270#comment-16543270 ] ASF subversion and git services commented on LUCENE-8397: - Commit 07de1dc011015ea7a344cda419e78397d00987c8 in lucene-solr's branch refs/heads/master from Mike McCandless [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=07de1dc ] LUCENE-8397: add DirectoryTaxonomyWriter.getCache > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Linux (32bit/jdk1.8.0_172) - Build # 22448 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/22448/ Java: 32bit/jdk1.8.0_172 -server -XX:+UseParallelGC 4 tests failed. FAILED: org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive Error Message: Jetty Connector is not open: -2 Stack Trace: java.lang.IllegalStateException: Jetty Connector is not open: -2 at __randomizedtesting.SeedInfo.seed([59C36E2EAB4690D1:DC77425997B92949]:0) at org.apache.solr.client.solrj.embedded.JettySolrRunner.getBaseUrl(JettySolrRunner.java:499) at org.apache.solr.cloud.MiniSolrCloudCluster.getReplicaJetty(MiniSolrCloudCluster.java:539) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:100) at org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive(LeaderTragicEventTest.java:150) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED:
[jira] [Comment Edited] (LUCENE-8291) Possible security issue when parsing XML documents containing external entity references
[ https://issues.apache.org/jira/browse/LUCENE-8291?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543201#comment-16543201 ] Andrejs Aleksejevs edited comment on LUCENE-8291 at 7/13/18 1:44 PM: - I have used this construction to load database configurations, now I got an error. What's the best way to load configurations for each core in solrconfig.xml? {\{http://www.w3.org/2001/XInclude;> }} {\{ }} {{ <}}{\{xi:include href="file:///var/lib/solr/conf/database.dih.dev.cr.xml" /> }} {{}} {\{ }} *database.dih.dev.cr.xml* {{ data-config.xml org.mariadb.jdbc.Driver jdbc:mysql://localhost:3306database_name userName password }} was (Author: oyeme): I have used this construction to load database configurations, now I got an error. What's the best way to load configurations for each core in solrconfig.xml? {{http://www.w3.org/2001/XInclude;> }} {{ }} {{ <}}{{xi:include href="file:///var/lib/solr/conf/database.dih.dev.cr.xml" /> }} {{}} {{ }} > Possible security issue when parsing XML documents containing external entity > references > > > Key: LUCENE-8291 > URL: https://issues.apache.org/jira/browse/LUCENE-8291 > Project: Lucene - Core > Issue Type: Bug > Components: modules/queryparser >Affects Versions: 7.2.1 >Reporter: Hendrik Saly >Assignee: Uwe Schindler >Priority: Major > Fix For: 7.4, master (8.0) > > Attachments: LUCENE-8291-2.patch, LUCENE-8291.patch > > > It appears that in QueryTemplateManager.java lines 149 and 198 and in > DOMUtils.java line 204 XML is parsed without disabling external entity > references (XXE). This is described in > [http://cwe.mitre.org/data/definitions/611.html] and possible mitigations are > listed here: > [https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Prevention_Cheat_Sheet] > All recent versions of lucene are affected. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8291) Possible security issue when parsing XML documents containing external entity references
[ https://issues.apache.org/jira/browse/LUCENE-8291?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16543201#comment-16543201 ] Andrejs Aleksejevs commented on LUCENE-8291: I have used this construction to load database configurations, now I got an error. What's the best way to load configurations for each core in solrconfig.xml? {{http://www.w3.org/2001/XInclude;> }} {{ }} {{ <}}{{xi:include href="file:///var/lib/solr/conf/database.dih.dev.cr.xml" /> }} {{}} {{ }} > Possible security issue when parsing XML documents containing external entity > references > > > Key: LUCENE-8291 > URL: https://issues.apache.org/jira/browse/LUCENE-8291 > Project: Lucene - Core > Issue Type: Bug > Components: modules/queryparser >Affects Versions: 7.2.1 >Reporter: Hendrik Saly >Assignee: Uwe Schindler >Priority: Major > Fix For: 7.4, master (8.0) > > Attachments: LUCENE-8291-2.patch, LUCENE-8291.patch > > > It appears that in QueryTemplateManager.java lines 149 and 198 and in > DOMUtils.java line 204 XML is parsed without disabling external entity > references (XXE). This is described in > [http://cwe.mitre.org/data/definitions/611.html] and possible mitigations are > listed here: > [https://www.owasp.org/index.php/XML_External_Entity_(XXE)_Prevention_Cheat_Sheet] > All recent versions of lucene are affected. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-11-ea+22) - Build # 2315 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/2315/ Java: 64bit/jdk-11-ea+22 -XX:-UseCompressedOops -XX:+UseG1GC 81 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestSQLHandler Error Message: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=4389, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=4389, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) at __randomizedtesting.SeedInfo.seed([CDB39D0A4F94FA09]:0) FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestSQLHandler Error Message: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=5462, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=5462, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) at __randomizedtesting.SeedInfo.seed([CDB39D0A4F94FA09]:0) FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestSQLHandler Error Message: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=7782, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=7782, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) at __randomizedtesting.SeedInfo.seed([CDB39D0A4F94FA09]:0) FAILED: junit.framework.TestSuite.org.apache.solr.handler.TestSQLHandler Error Message: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=22170, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.handler.TestSQLHandler: 1) Thread[id=22170, name=Connection evictor, state=TIMED_WAITING, group=TGRP-TestSQLHandler] at java.base@11-ea/java.lang.Thread.sleep(Native Method) at app//org.apache.http.impl.client.IdleConnectionEvictor$1.run(IdleConnectionEvictor.java:66) at java.base@11-ea/java.lang.Thread.run(Thread.java:834) at __randomizedtesting.SeedInfo.seed([CDB39D0A4F94FA09]:0) FAILED: junit.framework.TestSuite.org.apache.solr.servlet.HttpSolrCallGetCoreTest Error Message: Could not find collection:collection1 Stack Trace: java.lang.AssertionError: Could not find collection:collection1 at __randomizedtesting.SeedInfo.seed([CDB39D0A4F94FA09]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:155) at org.apache.solr.servlet.HttpSolrCallGetCoreTest.setupCluster(HttpSolrCallGetCoreTest.java:52)
[JENKINS] Lucene-Solr-NightlyTests-7.x - Build # 261 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-7.x/261/ 2 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.TestTlogReplica Error Message: ObjectTracker found 3 object(s) that were not released!!! [SolrCore, MockDirectoryWrapper, MockDirectoryWrapper] org.apache.solr.common.util.ObjectReleaseTracker$ObjectTrackerException: org.apache.solr.core.SolrCore at org.apache.solr.common.util.ObjectReleaseTracker.track(ObjectReleaseTracker.java:42) at org.apache.solr.core.SolrCore.(SolrCore.java:1045) at org.apache.solr.core.SolrCore.(SolrCore.java:869) at org.apache.solr.core.CoreContainer.createFromDescriptor(CoreContainer.java:1135) at org.apache.solr.core.CoreContainer.create(CoreContainer.java:1045) at org.apache.solr.handler.admin.CoreAdminOperation.lambda$static$0(CoreAdminOperation.java:92) at org.apache.solr.handler.admin.CoreAdminOperation.execute(CoreAdminOperation.java:360) at org.apache.solr.handler.admin.CoreAdminHandler$CallInfo.call(CoreAdminHandler.java:395) at org.apache.solr.handler.admin.CoreAdminHandler.handleRequestBody(CoreAdminHandler.java:180) at org.apache.solr.handler.RequestHandlerBase.handleRequest(RequestHandlerBase.java:199) at org.apache.solr.servlet.HttpSolrCall.handleAdmin(HttpSolrCall.java:734) at org.apache.solr.servlet.HttpSolrCall.handleAdminRequest(HttpSolrCall.java:715) at org.apache.solr.servlet.HttpSolrCall.call(HttpSolrCall.java:496) at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:377) at org.apache.solr.servlet.SolrDispatchFilter.doFilter(SolrDispatchFilter.java:323) at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642) at org.apache.solr.client.solrj.embedded.JettySolrRunner$DebugFilter.doFilter(JettySolrRunner.java:139) at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1642) at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533) at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255) at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595) at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:255) at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1317) at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:203) at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473) at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564) at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:201) at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1219) at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:144) at org.eclipse.jetty.server.handler.gzip.GzipHandler.handle(GzipHandler.java:674) at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132) at org.eclipse.jetty.server.Server.handle(Server.java:531) at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:352) at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:260) at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:281) at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:102) at org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:118) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.runTask(EatWhatYouKill.java:333) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:310) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:168) at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.run(EatWhatYouKill.java:126) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:366) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:762) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:680) at java.lang.Thread.run(Thread.java:748) org.apache.solr.common.util.ObjectReleaseTracker$ObjectTrackerException: org.apache.lucene.store.MockDirectoryWrapper at org.apache.solr.common.util.ObjectReleaseTracker.track(ObjectReleaseTracker.java:42) at org.apache.solr.core.CachingDirectoryFactory.get(CachingDirectoryFactory.java:348) at org.apache.solr.update.SolrIndexWriter.create(SolrIndexWriter.java:95) at org.apache.solr.core.SolrCore.initIndex(SolrCore.java:768) at org.apache.solr.core.SolrCore.(SolrCore.java:960) at org.apache.solr.core.SolrCore.(SolrCore.java:869) at org.apache.solr.core.CoreContainer.createFromDescriptor(CoreContainer.java:1135) at org.apache.solr.core.CoreContainer.create(CoreContainer.java:1045) at
[JENKINS] Lucene-Solr-master-Solaris (64bit/jdk1.8.0) - Build # 1967 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Solaris/1967/ Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseSerialGC 5 tests failed. FAILED: org.apache.solr.cloud.LeaderTragicEventTest.test Error Message: Timeout waiting for new replica become leader null Live Nodes: [127.0.0.1:32981_solr, 127.0.0.1:63828_solr] Last available state: DocCollection(collection1//collections/collection1/state.json/5)={ "pullReplicas":"0", "replicationFactor":"2", "shards":{"shard1":{ "range":"8000-7fff", "state":"active", "replicas":{ "core_node3":{ "core":"collection1_shard1_replica_n1", "base_url":"http://127.0.0.1:32981/solr;, "node_name":"127.0.0.1:32981_solr", "state":"active", "type":"NRT", "force_set_state":"false"}, "core_node4":{ "core":"collection1_shard1_replica_n2", "base_url":"http://127.0.0.1:63828/solr;, "node_name":"127.0.0.1:63828_solr", "state":"active", "type":"NRT", "force_set_state":"false", "leader":"true", "router":{"name":"compositeId"}, "maxShardsPerNode":"1", "autoAddReplicas":"false", "nrtReplicas":"2", "tlogReplicas":"0"} Stack Trace: java.lang.AssertionError: Timeout waiting for new replica become leader null Live Nodes: [127.0.0.1:32981_solr, 127.0.0.1:63828_solr] Last available state: DocCollection(collection1//collections/collection1/state.json/5)={ "pullReplicas":"0", "replicationFactor":"2", "shards":{"shard1":{ "range":"8000-7fff", "state":"active", "replicas":{ "core_node3":{ "core":"collection1_shard1_replica_n1", "base_url":"http://127.0.0.1:32981/solr;, "node_name":"127.0.0.1:32981_solr", "state":"active", "type":"NRT", "force_set_state":"false"}, "core_node4":{ "core":"collection1_shard1_replica_n2", "base_url":"http://127.0.0.1:63828/solr;, "node_name":"127.0.0.1:63828_solr", "state":"active", "type":"NRT", "force_set_state":"false", "leader":"true", "router":{"name":"compositeId"}, "maxShardsPerNode":"1", "autoAddReplicas":"false", "nrtReplicas":"2", "tlogReplicas":"0"} at __randomizedtesting.SeedInfo.seed([353ADAD86D49C7EC:BD6EE502C3B5AA14]:0) at org.junit.Assert.fail(Assert.java:93) at org.apache.solr.cloud.SolrCloudTestCase.waitForState(SolrCloudTestCase.java:278) at org.apache.solr.cloud.LeaderTragicEventTest.test(LeaderTragicEventTest.java:76) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at
[jira] [Commented] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16542887#comment-16542887 ] Shai Erera commented on LUCENE-8397: +1 > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16542856#comment-16542856 ] Adrien Grand commented on LUCENE-8397: -- +1 > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16542852#comment-16542852 ] Michael McCandless commented on LUCENE-8397: Simple patch. > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
[ https://issues.apache.org/jira/browse/LUCENE-8397?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Michael McCandless updated LUCENE-8397: --- Attachment: LUCENE-8397.patch > Add DirectoryTaxonomyWriter.getCache > > > Key: LUCENE-8397 > URL: https://issues.apache.org/jira/browse/LUCENE-8397 > Project: Lucene - Core > Issue Type: Improvement >Reporter: Michael McCandless >Assignee: Michael McCandless >Priority: Major > Attachments: LUCENE-8397.patch > > > {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / > ordinals. You can provide an impl when you create the class, or it will use > a default impl. > > I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve > the cache it's using; this is helpful for getting diagnostics (how many > cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (LUCENE-8397) Add DirectoryTaxonomyWriter.getCache
Michael McCandless created LUCENE-8397: -- Summary: Add DirectoryTaxonomyWriter.getCache Key: LUCENE-8397 URL: https://issues.apache.org/jira/browse/LUCENE-8397 Project: Lucene - Core Issue Type: Improvement Reporter: Michael McCandless Assignee: Michael McCandless {{DirectoryTaxonomyWriter}} uses a cache to hold recently mapped labels / ordinals. You can provide an impl when you create the class, or it will use a default impl. I'd like to add a getter, {{DirectoryTaxonomyWriter.getCache}} to retrieve the cache it's using; this is helpful for getting diagnostics (how many cached labels, how much RAM used, etc.). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16542845#comment-16542845 ] Michael McCandless commented on LUCENE-8396: This looks really cool! It is great to see many dimensions being used with points. +1 to push to sandbox and iterate there. > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, polyWHole.png, tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-10.0.1) - Build # 22447 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/22447/ Java: 64bit/jdk-10.0.1 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC 4 tests failed. FAILED: org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive Error Message: Jetty Connector is not open: -2 Stack Trace: java.lang.IllegalStateException: Jetty Connector is not open: -2 at __randomizedtesting.SeedInfo.seed([138E0C43058FFD:85A7A27B7FFA3665]:0) at org.apache.solr.client.solrj.embedded.JettySolrRunner.getBaseUrl(JettySolrRunner.java:499) at org.apache.solr.cloud.MiniSolrCloudCluster.getReplicaJetty(MiniSolrCloudCluster.java:539) at org.apache.solr.cloud.LeaderTragicEventTest.corruptLeader(LeaderTragicEventTest.java:100) at org.apache.solr.cloud.LeaderTragicEventTest.testOtherReplicasAreNotActive(LeaderTragicEventTest.java:150) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) FAILED: org.apache.solr.cloud.LeaderTragicEventTest.test Error
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16542779#comment-16542779 ] Adrien Grand commented on LUCENE-8396: -- bq. do quantization first [...] and then make tessellation work directly in the quantized space Now that I think about it, it's probably easier to do it the other way around, as the polygon could easily become invalid through quantization? Having triangles hold doubles rather than quantized ints might also help if we want to further split these triangles in the future, eg. to prevent flat triangles from increasing the area of the intersection of MBRs of sibling nodes in the tree. > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, polyWHole.png, tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (LUCENE-8396) Add Points Based Shape Indexing
[ https://issues.apache.org/jira/browse/LUCENE-8396?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16542761#comment-16542761 ] Adrien Grand commented on LUCENE-8396: -- This looks awesome. Indexing two additional dimensions sounds worth it to me if it helps index way fewer fields. It's also exciting we're exercising high numbers of dimensions with the points API. Since this is targeting sandbox and the patch is a bit large, I think it's easier to get it in soon and iterate from there? It looks pretty clean to me already. Let's maybe just make windingOrder final on the Polygon class first. Some other comments I had while skimming through the patch: - LatLonShape's javadocs say that "Finding all shapes within a range at search time is efficient." but I think it really means "intersect" rather than "within" - TriangleField should be private and maybe renamed to LatLonTriangle for consistency with LatLonBoundingBox and LatLonPoint? - LatLonShapeBoundingBoxQuery should be pkg-private and its constructor should validate that the box doesn't cross the dateline? - Tessellator would probably be a bit easier to test and debug if it didn't handle both quantization and tessellation at once. Can we eg. do quantization first (eg. with a clone of the Polygon class that has ints instead of doubles) on top of tessellation and then make tessellation work directly in the quantized space? (Probably best done after merging to ease reviewing) - Tessellator.Node uses encodeLatCeil and encodeLonCeil, but it should really use encodeLat and encodeLon? - Some tests like TestLatLonShape.testSVG and TestTessellator.testBug write files and don't assert anything, looks like left-overs? > Add Points Based Shape Indexing > --- > > Key: LUCENE-8396 > URL: https://issues.apache.org/jira/browse/LUCENE-8396 > Project: Lucene - Core > Issue Type: New Feature >Reporter: Nicholas Knize >Priority: Major > Attachments: LUCENE-8396.patch, polyWHole.png, tessellatedPoly.png > > > I've been tinkering with this for a while and would like to solicit some > feedback. I'd like to introduce a new shape field based on the BKD/Points > codec to bring much of the Points based performance improvements to the shape > indexing and search usecase. Much like the existing shape indexing in > {{spatial-extras}} the shape will be decomposed into smaller parts, but > instead of decomposing into quad cells (which have the drawback of precision > accuracy and sheer volume of terms) I'd like to explore decomposing the > shapes into a triangular mesh; similar to gaming and computer graphics. Not > only does this approach reduce the number of terms, but it has the added > benefit of better accuracy (precision is based on the index encoding > technique instead of the spatial resolution of the quad cell). > For better clarity, consider the following illustrations (of a polygon in a 1 > degree x 1 degree spatial area). The first is using the quad tree technique > applied in the existing inverted index. The second is using a triangular mesh > decomposition as used by popular OpenGL and javascript rendering systems > (such as those used by mapbox). > !polyWHole.png! > Decomposing this shape using a quad tree results in 1,105,889 quad terms at 3 > meter spatial resolution. > !tessellatedPoly.png! > > Decomposing using a triangular mesh results in 8 triangles at the same > resolution as {{encodeLat/Lon}}. > The decomposed triangles can then be encoded as a 6 dimensional POINT and > queries are implemented using the computed relations against these triangles > (similar to how its done with the inverted index today). -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-11-ea+21) - Build # 2310 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/2310/ Java: 64bit/jdk-11-ea+21 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC 30 tests failed. FAILED: org.apache.solr.client.solrj.SolrSchemalessExampleTest.testFieldMutating Error Message: Stack Trace: java.lang.NullPointerException at __randomizedtesting.SeedInfo.seed([83F0B3C88215564A:751237F7DD8FBCD8]:0) at java.base/sun.security.ssl.SSLSessionImpl.getLocalPrincipal(SSLSessionImpl.java:661) at org.apache.http.impl.client.DefaultUserTokenHandler.getUserToken(DefaultUserTokenHandler.java:84) at org.apache.http.impl.execchain.MainClientExec.execute(MainClientExec.java:328) at org.apache.http.impl.execchain.ProtocolExec.execute(ProtocolExec.java:185) at org.apache.http.impl.execchain.RetryExec.execute(RetryExec.java:89) at org.apache.http.impl.execchain.RedirectExec.execute(RedirectExec.java:111) at org.apache.http.impl.client.InternalHttpClient.doExecute(InternalHttpClient.java:185) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:83) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:108) at org.apache.http.impl.client.CloseableHttpClient.execute(CloseableHttpClient.java:56) at org.apache.solr.client.solrj.SolrSchemalessExampleTest.testFieldMutating(SolrSchemalessExampleTest.java:109) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:566) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
[JENKINS] Lucene-Solr-SmokeRelease-7.x - Build # 259 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-7.x/259/ No tests ran. Build Log: [...truncated 22999 lines...] [asciidoctor:convert] asciidoctor: ERROR: about-this-guide.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.) [asciidoctor:convert] asciidoctor: ERROR: solr-glossary.adoc: line 1: invalid part, must have at least one section (e.g., chapter, appendix, etc.) [java] Processed 2220 links (1775 relative) to 2998 anchors in 229 files [echo] Validated Links & Anchors via: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/build/solr-ref-guide/bare-bones-html/ -dist-changes: [copy] Copying 4 files to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/package/changes -dist-keys: [get] Getting: http://home.apache.org/keys/group/lucene.asc [get] To: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/package/KEYS package: -unpack-solr-tgz: -ensure-solr-tgz-exists: [mkdir] Created dir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/build/solr.tgz.unpacked [untar] Expanding: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/package/solr-7.5.0.tgz into /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/solr/build/solr.tgz.unpacked generate-maven-artifacts: resolve: resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check: [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. -ivy-fail-disallowed-ivy-version: ivy-fail: ivy-configure: [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/top-level-ivy-settings.xml resolve: ivy-availability-check:
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk1.8.0_172) - Build # 22446 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/22446/ Java: 64bit/jdk1.8.0_172 -XX:-UseCompressedOops -XX:+UseParallelGC 3 tests failed. FAILED: org.apache.solr.handler.component.InfixSuggestersTest.testShutdownDuringBuild Error Message: junit.framework.AssertionFailedError: Unexpected wrapped exception type, expected CoreIsClosedException Stack Trace: java.util.concurrent.ExecutionException: junit.framework.AssertionFailedError: Unexpected wrapped exception type, expected CoreIsClosedException at __randomizedtesting.SeedInfo.seed([201C752EA11641F5:FF9117919F7F1497]:0) at java.util.concurrent.FutureTask.report(FutureTask.java:122) at java.util.concurrent.FutureTask.get(FutureTask.java:192) at org.apache.solr.handler.component.InfixSuggestersTest.testShutdownDuringBuild(InfixSuggestersTest.java:130) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Caused by: junit.framework.AssertionFailedError: