[JENKINS] Lucene-Solr-Tests-master - Build # 2278 - Still Unstable
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/2278/ 7 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.analytics.OverallAnalyticsTest Error Message: 1 thread leaked from SUITE scope at org.apache.solr.analytics.OverallAnalyticsTest: 1) Thread[id=488, name=qtp424341292-488, state=TIMED_WAITING, group=TGRP-OverallAnalyticsTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.analytics.OverallAnalyticsTest: 1) Thread[id=488, name=qtp424341292-488, state=TIMED_WAITING, group=TGRP-OverallAnalyticsTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) at __randomizedtesting.SeedInfo.seed([F7449FA859D9CE6B]:0) FAILED: junit.framework.TestSuite.org.apache.solr.analytics.OverallAnalyticsTest Error Message: There are still zombie threads that couldn't be terminated:1) Thread[id=488, name=qtp424341292-488, state=TIMED_WAITING, group=TGRP-OverallAnalyticsTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie threads that couldn't be terminated: 1) Thread[id=488, name=qtp424341292-488, state=TIMED_WAITING, group=TGRP-OverallAnalyticsTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) at __randomizedtesting.SeedInfo.seed([F7449FA859D9CE6B]:0) FAILED: org.apache.solr.cloud.TestTlogReplica.testRecovery Error Message: Can not find doc 8 in https://127.0.0.1:44378/solr Stack Trace: java.lang.AssertionError: Can not find doc 8 in https://127.0.0.1:44378/solr at __randomizedtesting.SeedInfo.seed([8CCFA3740F1B19D6:4D3FDAD8224BD371]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.apache.solr.cloud.TestTlogReplica.checkRTG(TestTlogReplica.java:885) at org.apache.solr.cloud.TestTlogReplica.testRecovery(TestTlogReplica.java:599) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-9.0.1) - Build # 21355 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21355/ Java: 64bit/jdk-9.0.1 -XX:-UseCompressedOops -XX:+UseSerialGC 3 tests failed. FAILED: org.apache.solr.cloud.MoveReplicaHDFSTest.testFailedMove Error Message: No live SolrServers available to handle this request:[http://127.0.0.1:44409/solr/MoveReplicaHDFSTest_failed_coll_true, http://127.0.0.1:35875/solr/MoveReplicaHDFSTest_failed_coll_true] Stack Trace: org.apache.solr.client.solrj.SolrServerException: No live SolrServers available to handle this request:[http://127.0.0.1:44409/solr/MoveReplicaHDFSTest_failed_coll_true, http://127.0.0.1:35875/solr/MoveReplicaHDFSTest_failed_coll_true] at __randomizedtesting.SeedInfo.seed([DA3283F84AD85003:70FF500AFD0B85D3]:0) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:462) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1104) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:991) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrClient.query(SolrClient.java:942) at org.apache.solr.cloud.MoveReplicaTest.testFailedMove(MoveReplicaTest.java:309) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53)
[JENKINS] Lucene-Solr-7.x-Solaris (64bit/jdk1.8.0) - Build # 415 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Solaris/415/ Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseParallelGC 4 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.TestSolrCloudWithSecureImpersonation Error Message: 2 threads leaked from SUITE scope at org.apache.solr.cloud.TestSolrCloudWithSecureImpersonation: 1) Thread[id=28510, name=jetty-launcher-5585-thread-2-EventThread, state=TIMED_WAITING, group=TGRP-TestSolrCloudWithSecureImpersonation] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1037) at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:277) at org.apache.curator.CuratorZookeeperClient.internalBlockUntilConnectedOrTimedOut(CuratorZookeeperClient.java:323) at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:105) at org.apache.curator.framework.imps.GetDataBuilderImpl.pathInForeground(GetDataBuilderImpl.java:288) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:279) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:41) at org.apache.curator.framework.recipes.shared.SharedValue.readValue(SharedValue.java:244) at org.apache.curator.framework.recipes.shared.SharedValue.access$100(SharedValue.java:44) at org.apache.curator.framework.recipes.shared.SharedValue$1.process(SharedValue.java:61) at org.apache.curator.framework.imps.NamespaceWatcher.process(NamespaceWatcher.java:67) at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:530) at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:505) 2) Thread[id=28500, name=jetty-launcher-5585-thread-1-EventThread, state=TIMED_WAITING, group=TGRP-TestSolrCloudWithSecureImpersonation] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1037) at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:277) at org.apache.curator.CuratorZookeeperClient.internalBlockUntilConnectedOrTimedOut(CuratorZookeeperClient.java:323) at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:105) at org.apache.curator.framework.imps.GetDataBuilderImpl.pathInForeground(GetDataBuilderImpl.java:288) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:279) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:41) at org.apache.curator.framework.recipes.shared.SharedValue.readValue(SharedValue.java:244) at org.apache.curator.framework.recipes.shared.SharedValue.access$100(SharedValue.java:44) at org.apache.curator.framework.recipes.shared.SharedValue$1.process(SharedValue.java:61) at org.apache.curator.framework.imps.NamespaceWatcher.process(NamespaceWatcher.java:67) at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:530) at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:505) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 2 threads leaked from SUITE scope at org.apache.solr.cloud.TestSolrCloudWithSecureImpersonation: 1) Thread[id=28510, name=jetty-launcher-5585-thread-2-EventThread, state=TIMED_WAITING, group=TGRP-TestSolrCloudWithSecureImpersonation] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1037) at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:277) at org.apache.curator.CuratorZookeeperClient.internalBlockUntilConnectedOrTimedOut(CuratorZookeeperClient.java:323) at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:105) at org.apache.curator.framework.imps.GetDataBuilderImpl.pathInForeground(GetDataBuilderImpl.java:288) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:279) at
[JENKINS] Lucene-Solr-7.x-Linux (64bit/jdk-9.0.1) - Build # 1254 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1254/ Java: 64bit/jdk-9.0.1 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC 4 tests failed. FAILED: org.apache.solr.cloud.FullSolrCloudDistribCmdsTest.test Error Message: Could not find collection:collection2 Stack Trace: java.lang.AssertionError: Could not find collection:collection2 at __randomizedtesting.SeedInfo.seed([5E5AAE022AC85CF5:D60E91D88434310D]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:155) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:140) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:135) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:915) at org.apache.solr.cloud.FullSolrCloudDistribCmdsTest.testIndexingBatchPerRequestWithHttpSolrClient(FullSolrCloudDistribCmdsTest.java:612) at org.apache.solr.cloud.FullSolrCloudDistribCmdsTest.test(FullSolrCloudDistribCmdsTest.java:152) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
[jira] [Comment Edited] (SOLR-11066) Implement a scheduled trigger
[ https://issues.apache.org/jira/browse/SOLR-11066?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341645#comment-16341645 ] David Smiley edited comment on SOLR-11066 at 1/27/18 5:04 AM: -- Shouldn't startTime end in a 'Z', as is standard in Solr? Why use a custom SimpleDateFormat, which by the way is an old API? Simply parse using {{DateMathParser.parseMath(null, str).toInstant()}}, and an Instant has a toString() of the standard ISO-8601 with 'Z' DEFAULT_GRACE_TIME_MS could be more clearly defined as: {{TimeUnit.MINUTES.toMillis(15)}} was (Author: dsmiley): Shouldn't startTime end in a 'Z', as is standard in Solr? Why use a custom SimpleDateFormat, which by the way is an old API?Simply parse using DateMathParser.parse(str).toInstant(), and an Instant has a toString() of the standard ISO-8601 with 'Z' DEFAULT_GRACE_TIME_MS could be more clearly defined as: {{TimeUnit.MINUTES.toMillis(15)}} > Implement a scheduled trigger > - > > Key: SOLR-11066 > URL: https://issues.apache.org/jira/browse/SOLR-11066 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: AutoScaling, SolrCloud >Reporter: Shalin Shekhar Mangar >Assignee: Shalin Shekhar Mangar >Priority: Major > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11066.patch > > > Implement a trigger that runs on a fixed interval say every 1 hour or every > 24 hours starting at midnight etc. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Solaris (64bit/jdk1.8.0) - Build # 1649 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Solaris/1649/ Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseConcMarkSweepGC 3 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.handler.PingRequestHandlerTest Error Message: 1 thread leaked from SUITE scope at org.apache.solr.handler.PingRequestHandlerTest: 1) Thread[id=27624, name=qtp2011894227-27624, state=TIMED_WAITING, group=TGRP-PingRequestHandlerTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 1 thread leaked from SUITE scope at org.apache.solr.handler.PingRequestHandlerTest: 1) Thread[id=27624, name=qtp2011894227-27624, state=TIMED_WAITING, group=TGRP-PingRequestHandlerTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) at __randomizedtesting.SeedInfo.seed([E141BBE5C606133F]:0) FAILED: junit.framework.TestSuite.org.apache.solr.handler.PingRequestHandlerTest Error Message: There are still zombie threads that couldn't be terminated:1) Thread[id=27624, name=qtp2011894227-27624, state=TIMED_WAITING, group=TGRP-PingRequestHandlerTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: There are still zombie threads that couldn't be terminated: 1) Thread[id=27624, name=qtp2011894227-27624, state=TIMED_WAITING, group=TGRP-PingRequestHandlerTest] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2163) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.reservedWait(ReservedThreadExecutor.java:308) at org.eclipse.jetty.util.thread.ReservedThreadExecutor$ReservedThread.run(ReservedThreadExecutor.java:373) at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:708) at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:626) at java.lang.Thread.run(Thread.java:748) at __randomizedtesting.SeedInfo.seed([E141BBE5C606133F]:0) FAILED: org.apache.solr.client.solrj.io.stream.StreamExpressionTest.testDistributions Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([DE7272572450C1AC:618D33FDFAAA2130]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertTrue(Assert.java:54) at org.apache.solr.client.solrj.io.stream.StreamExpressionTest.testDistributions(StreamExpressionTest.java:6637) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at
[JENKINS] Lucene-Solr-SmokeRelease-7.x - Build # 130 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-7.x/130/ No tests ran. Build Log: [...truncated 28288 lines...] prepare-release-no-sign: [mkdir] Created dir: /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist [copy] Copying 491 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/lucene [copy] Copying 215 files to /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/solr [smoker] Java 1.8 JAVA_HOME=/home/jenkins/tools/java/latest1.8 [smoker] NOTE: output encoding is UTF-8 [smoker] [smoker] Load release URL "file:/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/dist/"... [smoker] [smoker] Test Lucene... [smoker] test basics... [smoker] get KEYS [smoker] 0.2 MB in 0.01 sec (37.4 MB/sec) [smoker] check changes HTML... [smoker] download lucene-7.3.0-src.tgz... [smoker] 31.7 MB in 0.02 sec (1276.6 MB/sec) [smoker] verify md5/sha1 digests [smoker] download lucene-7.3.0.tgz... [smoker] 73.1 MB in 0.06 sec (1231.5 MB/sec) [smoker] verify md5/sha1 digests [smoker] download lucene-7.3.0.zip... [smoker] 83.6 MB in 0.07 sec (1234.7 MB/sec) [smoker] verify md5/sha1 digests [smoker] unpack lucene-7.3.0.tgz... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] test demo with 1.8... [smoker] got 6284 hits for query "lucene" [smoker] checkindex with 1.8... [smoker] check Lucene's javadoc JAR [smoker] unpack lucene-7.3.0.zip... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] test demo with 1.8... [smoker] got 6284 hits for query "lucene" [smoker] checkindex with 1.8... [smoker] check Lucene's javadoc JAR [smoker] unpack lucene-7.3.0-src.tgz... [smoker] make sure no JARs/WARs in src dist... [smoker] run "ant validate" [smoker] run tests w/ Java 8 and testArgs='-Dtests.slow=false'... [smoker] test demo with 1.8... [smoker] got 215 hits for query "lucene" [smoker] checkindex with 1.8... [smoker] generate javadocs w/ Java 8... [smoker] [smoker] Crawl/parse... [smoker] [smoker] Verify... [smoker] confirm all releases have coverage in TestBackwardsCompatibility [smoker] find all past Lucene releases... [smoker] run TestBackwardsCompatibility.. [smoker] success! [smoker] [smoker] Test Solr... [smoker] test basics... [smoker] get KEYS [smoker] 0.2 MB in 0.00 sec (171.0 MB/sec) [smoker] check changes HTML... [smoker] download solr-7.3.0-src.tgz... [smoker] 54.0 MB in 0.05 sec (1180.3 MB/sec) [smoker] verify md5/sha1 digests [smoker] download solr-7.3.0.tgz... [smoker] 151.5 MB in 0.13 sec (1194.3 MB/sec) [smoker] verify md5/sha1 digests [smoker] download solr-7.3.0.zip... [smoker] 152.5 MB in 0.14 sec (1069.9 MB/sec) [smoker] verify md5/sha1 digests [smoker] unpack solr-7.3.0.tgz... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] unpack lucene-7.3.0.tgz... [smoker] **WARNING**: skipping check of /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0/contrib/dataimporthandler-extras/lib/javax.mail-1.5.1.jar: it has javax.* classes [smoker] **WARNING**: skipping check of /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0/contrib/dataimporthandler-extras/lib/activation-1.1.1.jar: it has javax.* classes [smoker] copying unpacked distribution for Java 8 ... [smoker] test solr example w/ Java 8... [smoker] start Solr instance (log=/home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0-java8/solr-example.log)... [smoker] No process found for Solr node running on port 8983 [smoker] Running techproducts example on port 8983 from /home/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-7.x/lucene/build/smokeTestRelease/tmp/unpack/solr-7.3.0-java8 [smoker] *** [WARN] *** Your open file limit is currently 6. [smoker] It should be set to 65000 to avoid operational disruption. [smoker] If you no longer wish to see this warning, set SOLR_ULIMIT_CHECKS to false in your profile or solr.in.sh [smoker] *** [WARN] *** Your Max Processes Limit is currently 10240. [smoker] It should be set to 65000 to avoid operational disruption. [smoker] If you no longer wish to see this warning, set SOLR_ULIMIT_CHECKS to false in your profile or solr.in.sh [smoker] Creating
[jira] [Commented] (SOLR-11900) API command to delete oldest collections in a time routed alias
[ https://issues.apache.org/jira/browse/SOLR-11900?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341937#comment-16341937 ] David Smiley commented on SOLR-11900: - Perhaps in fact we don't actually need a new API but instead have a delete query that looks like this {{timeRoutedField:[* TO NOW/MONTH]}} auto-purge the old collections. We've already got the URP in place to intercept and act. Arguably if new data creates collections, telling it to delete old stuff should delete the old collections. Regardless of how this feature looks, there will be a separate issue to auto-delete. The issue here is about being explicit about it. > API command to delete oldest collections in a time routed alias > --- > > Key: SOLR-11900 > URL: https://issues.apache.org/jira/browse/SOLR-11900 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Assignee: David Smiley >Priority: Major > Fix For: 7.3 > > > For Time Routed Aliases, we'll need an API command to delete the oldest > collection(s). Perhaps the command action name is > DELETE_COLLECTION_OF_ROUTED_ALIAS (yes that's long). And input is of course > the routed alias name, plus a mandatory "before" which is a standard time > input that Solr accepts that will likely include date math. Thus if you used > before="NOW/DAY-90DAYS" then your guaranteed to have the last 90 days worth > of data. If a collection overlaps past what "before" is computed to be then > it needs to stay. The pattern might match any number of collections, perhaps > none. But in all cases, the most recent collection must be retained -- the > time routed aliases must at all times refer to at least one collection. > The underlying steps will be to first update the alias, and then delete the > collection(s). It ought to return the collections that get deleted. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS-EA] Lucene-Solr-master-Linux (64bit/jdk-10-ea+41) - Build # 21354 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21354/ Java: 64bit/jdk-10-ea+41 -XX:-UseCompressedOops -XX:+UseSerialGC 2 tests failed. FAILED: org.apache.solr.cloud.autoscaling.AutoAddReplicasPlanActionTest.testSimple Error Message: IOException occured when talking to server at: https://127.0.0.1:38683/solr Stack Trace: org.apache.solr.client.solrj.SolrServerException: IOException occured when talking to server at: https://127.0.0.1:38683/solr at __randomizedtesting.SeedInfo.seed([317D0C661A6851EB:9CE28983D9B853A]:0) at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:657) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1104) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817) at org.apache.solr.client.solrj.SolrClient.request(SolrClient.java:1219) at org.apache.solr.cloud.autoscaling.AutoAddReplicasPlanActionTest.testSimple(AutoAddReplicasPlanActionTest.java:110) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
[jira] [Comment Edited] (SOLR-8362) Add docValues support for TextField
[ https://issues.apache.org/jira/browse/SOLR-8362?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341929#comment-16341929 ] Joel Bernstein edited comment on SOLR-8362 at 1/27/18 3:27 AM: --- I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used for all kinds of text analytics. was (Author: joel.bernstein): I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used all for all kinds of text analytics. > Add docValues support for TextField > --- > > Key: SOLR-8362 > URL: https://issues.apache.org/jira/browse/SOLR-8362 > Project: Solr > Issue Type: Improvement >Reporter: Hoss Man >Priority: Major > > At the last lucene/solr revolution, Toke asked a question about why TextField > doesn't support docValues. The short answer is because no one ever added it, > but the longer answer was because we would have to think through carefully > the _intent_ of supporting docValues for a "tokenized" field like TextField, > and how to support various conflicting usecases where they could be handy. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Comment Edited] (SOLR-8362) Add docValues support for TextField
[ https://issues.apache.org/jira/browse/SOLR-8362?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341929#comment-16341929 ] Joel Bernstein edited comment on SOLR-8362 at 1/27/18 3:26 AM: --- I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used all for all kinds of text analytics. was (Author: joel.bernstein): I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used all for all kinds of text analytics. > Add docValues support for TextField > --- > > Key: SOLR-8362 > URL: https://issues.apache.org/jira/browse/SOLR-8362 > Project: Solr > Issue Type: Improvement >Reporter: Hoss Man >Priority: Major > > At the last lucene/solr revolution, Toke asked a question about why TextField > doesn't support docValues. The short answer is because no one ever added it, > but the longer answer was because we would have to think through carefully > the _intent_ of supporting docValues for a "tokenized" field like TextField, > and how to support various conflicting usecases where they could be handy. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Comment Edited] (SOLR-8362) Add docValues support for TextField
[ https://issues.apache.org/jira/browse/SOLR-8362?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341929#comment-16341929 ] Joel Bernstein edited comment on SOLR-8362 at 1/27/18 3:25 AM: --- I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used all for all kinds of text analytics. was (Author: joel.bernstein): I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used all for all kinds of text analytics. > Add docValues support for TextField > --- > > Key: SOLR-8362 > URL: https://issues.apache.org/jira/browse/SOLR-8362 > Project: Solr > Issue Type: Improvement >Reporter: Hoss Man >Priority: Major > > At the last lucene/solr revolution, Toke asked a question about why TextField > doesn't support docValues. The short answer is because no one ever added it, > but the longer answer was because we would have to think through carefully > the _intent_ of supporting docValues for a "tokenized" field like TextField, > and how to support various conflicting usecases where they could be handy. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-8362) Add docValues support for TextField
[ https://issues.apache.org/jira/browse/SOLR-8362?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341929#comment-16341929 ] Joel Bernstein commented on SOLR-8362: -- I wanted to comment on re-indexing text fields with streaming expressions. There is a straight forward approach that does not require doc values described here: [http://joelsolr.blogspot.com/2016/10/solr-63-batch-jobs-parallel-etl-and.html] I'm not convinced that putting text fields into docValues is the way to go. The *significantTerms*, *features* and *train* streaming expressions can do some really nice things with text fields in the inverted index. In the next release, the new *termVectors* expression allows you to create on-the-fly tf-idf *term vectors* which can be used all for all kinds of text analytics. > Add docValues support for TextField > --- > > Key: SOLR-8362 > URL: https://issues.apache.org/jira/browse/SOLR-8362 > Project: Solr > Issue Type: Improvement >Reporter: Hoss Man >Priority: Major > > At the last lucene/solr revolution, Toke asked a question about why TextField > doesn't support docValues. The short answer is because no one ever added it, > but the longer answer was because we would have to think through carefully > the _intent_ of supporting docValues for a "tokenized" field like TextField, > and how to support various conflicting usecases where they could be handy. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-7.x-MacOSX (64bit/jdk-9) - Build # 423 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-MacOSX/423/ Java: 64bit/jdk-9 -XX:-UseCompressedOops -XX:+UseParallelGC 3 tests failed. FAILED: org.apache.solr.cloud.api.collections.TestCollectionsAPIViaSolrCloudCluster.testCollectionCreateSearchDelete Error Message: Could not find collection:testcollection Stack Trace: java.lang.AssertionError: Could not find collection:testcollection at __randomizedtesting.SeedInfo.seed([88B001E85647C9F1:2B4AAF4DD1AF2354]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:155) at org.apache.solr.cloud.api.collections.TestCollectionsAPIViaSolrCloudCluster.createCollection(TestCollectionsAPIViaSolrCloudCluster.java:93) at org.apache.solr.cloud.api.collections.TestCollectionsAPIViaSolrCloudCluster.testCollectionCreateSearchDelete(TestCollectionsAPIViaSolrCloudCluster.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
[JENKINS] Lucene-Solr-Tests-7.x - Build # 335 - Still unstable
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/335/ 5 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.TestCloudJSONFacetJoinDomain Error Message: Could not load collection from ZK: org.apache.solr.cloud.TestCloudJSONFacetJoinDomain_collection Stack Trace: org.apache.solr.common.SolrException: Could not load collection from ZK: org.apache.solr.cloud.TestCloudJSONFacetJoinDomain_collection at __randomizedtesting.SeedInfo.seed([39B3207A8764B2DF]:0) at org.apache.solr.common.cloud.ZkStateReader.getCollectionLive(ZkStateReader.java:1108) at org.apache.solr.common.cloud.ZkStateReader$LazyCollectionRef.get(ZkStateReader.java:647) at org.apache.solr.common.cloud.ClusterState.getCollectionOrNull(ClusterState.java:137) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:154) at org.apache.solr.cloud.TestCloudJSONFacetJoinDomain.waitForRecoveriesToFinish(TestCloudJSONFacetJoinDomain.java:806) at org.apache.solr.cloud.TestCloudJSONFacetJoinDomain.createMiniSolrCloudCluster(TestCloudJSONFacetJoinDomain.java:119) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:874) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Caused by: org.apache.zookeeper.KeeperException$SessionExpiredException: KeeperErrorCode = Session expired for /collections/org.apache.solr.cloud.TestCloudJSONFacetJoinDomain_collection/state.json at org.apache.zookeeper.KeeperException.create(KeeperException.java:127) at org.apache.zookeeper.KeeperException.create(KeeperException.java:51) at org.apache.zookeeper.ZooKeeper.getData(ZooKeeper.java:1212) at org.apache.solr.common.cloud.SolrZkClient.lambda$getData$5(SolrZkClient.java:339) at org.apache.solr.common.cloud.ZkCmdExecutor.retryOperation(ZkCmdExecutor.java:60) at org.apache.solr.common.cloud.SolrZkClient.getData(SolrZkClient.java:339) at org.apache.solr.common.cloud.ZkStateReader.fetchCollectionState(ZkStateReader.java:1120) at org.apache.solr.common.cloud.ZkStateReader.getCollectionLive(ZkStateReader.java:1106) ... 29 more FAILED: org.apache.solr.cloud.TestUtilizeNode.test Error Message: no replica should be present in 127.0.0.1:44718_solr Stack Trace: java.lang.AssertionError: no replica should be present in 127.0.0.1:44718_solr at __randomizedtesting.SeedInfo.seed([39B3207A8764B2DF:B1E71FA02998DF27]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at
[JENKINS] Lucene-Solr-7.x-Linux (32bit/jdk1.8.0_144) - Build # 1253 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1253/ Java: 32bit/jdk1.8.0_144 -client -XX:+UseSerialGC 2 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.TestSolrCloudWithSecureImpersonation Error Message: 2 threads leaked from SUITE scope at org.apache.solr.cloud.TestSolrCloudWithSecureImpersonation: 1) Thread[id=3397, name=jetty-launcher-787-thread-1-EventThread, state=TIMED_WAITING, group=TGRP-TestSolrCloudWithSecureImpersonation] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1037) at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:277) at org.apache.curator.CuratorZookeeperClient.internalBlockUntilConnectedOrTimedOut(CuratorZookeeperClient.java:323) at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:105) at org.apache.curator.framework.imps.GetDataBuilderImpl.pathInForeground(GetDataBuilderImpl.java:288) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:279) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:41) at org.apache.curator.framework.recipes.shared.SharedValue.readValue(SharedValue.java:244) at org.apache.curator.framework.recipes.shared.SharedValue.access$100(SharedValue.java:44) at org.apache.curator.framework.recipes.shared.SharedValue$1.process(SharedValue.java:61) at org.apache.curator.framework.imps.NamespaceWatcher.process(NamespaceWatcher.java:67) at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:530) at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:505) 2) Thread[id=3401, name=jetty-launcher-787-thread-2-EventThread, state=TIMED_WAITING, group=TGRP-TestSolrCloudWithSecureImpersonation] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1037) at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:277) at org.apache.curator.CuratorZookeeperClient.internalBlockUntilConnectedOrTimedOut(CuratorZookeeperClient.java:323) at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:105) at org.apache.curator.framework.imps.GetDataBuilderImpl.pathInForeground(GetDataBuilderImpl.java:288) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:279) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:41) at org.apache.curator.framework.recipes.shared.SharedValue.readValue(SharedValue.java:244) at org.apache.curator.framework.recipes.shared.SharedValue.access$100(SharedValue.java:44) at org.apache.curator.framework.recipes.shared.SharedValue$1.process(SharedValue.java:61) at org.apache.curator.framework.imps.NamespaceWatcher.process(NamespaceWatcher.java:67) at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:530) at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:505) Stack Trace: com.carrotsearch.randomizedtesting.ThreadLeakError: 2 threads leaked from SUITE scope at org.apache.solr.cloud.TestSolrCloudWithSecureImpersonation: 1) Thread[id=3397, name=jetty-launcher-787-thread-1-EventThread, state=TIMED_WAITING, group=TGRP-TestSolrCloudWithSecureImpersonation] at sun.misc.Unsafe.park(Native Method) at java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:215) at java.util.concurrent.locks.AbstractQueuedSynchronizer.doAcquireSharedNanos(AbstractQueuedSynchronizer.java:1037) at java.util.concurrent.locks.AbstractQueuedSynchronizer.tryAcquireSharedNanos(AbstractQueuedSynchronizer.java:1328) at java.util.concurrent.CountDownLatch.await(CountDownLatch.java:277) at org.apache.curator.CuratorZookeeperClient.internalBlockUntilConnectedOrTimedOut(CuratorZookeeperClient.java:323) at org.apache.curator.RetryLoop.callWithRetry(RetryLoop.java:105) at org.apache.curator.framework.imps.GetDataBuilderImpl.pathInForeground(GetDataBuilderImpl.java:288) at org.apache.curator.framework.imps.GetDataBuilderImpl.forPath(GetDataBuilderImpl.java:279) at
[JENKINS] Lucene-Solr-master-MacOSX (64bit/jdk1.8.0) - Build # 4410 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-MacOSX/4410/ Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseSerialGC 2 tests failed. FAILED: org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.test Error Message: shard2 is not consistent. Got 18 from http://127.0.0.1:65392/collection1_shard2_replica_n41 (previous client) and got 15 from http://127.0.0.1:65402/collection1_shard2_replica_n45 Stack Trace: java.lang.AssertionError: shard2 is not consistent. Got 18 from http://127.0.0.1:65392/collection1_shard2_replica_n41 (previous client) and got 15 from http://127.0.0.1:65402/collection1_shard2_replica_n45 at __randomizedtesting.SeedInfo.seed([2055D7445EB42AE:8A5162AEEB172F56]:0) at org.junit.Assert.fail(Assert.java:93) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.checkShardConsistency(AbstractFullDistribZkTestBase.java:1330) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.checkShardConsistency(AbstractFullDistribZkTestBase.java:1309) at org.apache.solr.cloud.ChaosMonkeySafeLeaderTest.test(ChaosMonkeySafeLeaderTest.java:162) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47)
[JENKINS] Lucene-Solr-SmokeRelease-master - Build # 933 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-SmokeRelease-master/933/ No tests ran. Build Log: [...truncated 28244 lines...] prepare-release-no-sign: [mkdir] Created dir: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/dist [copy] Copying 491 files to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/dist/lucene [copy] Copying 215 files to /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/dist/solr [smoker] Java 1.8 JAVA_HOME=/home/jenkins/tools/java/latest1.8 [smoker] NOTE: output encoding is UTF-8 [smoker] [smoker] Load release URL "file:/x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/dist/"... [smoker] [smoker] Test Lucene... [smoker] test basics... [smoker] get KEYS [smoker] 0.2 MB in 0.12 sec (1.9 MB/sec) [smoker] check changes HTML... [smoker] download lucene-8.0.0-src.tgz... [smoker] 30.2 MB in 0.09 sec (341.2 MB/sec) [smoker] verify md5/sha1 digests [smoker] download lucene-8.0.0.tgz... [smoker] 73.0 MB in 0.10 sec (729.1 MB/sec) [smoker] verify md5/sha1 digests [smoker] download lucene-8.0.0.zip... [smoker] 83.4 MB in 0.08 sec (1101.0 MB/sec) [smoker] verify md5/sha1 digests [smoker] unpack lucene-8.0.0.tgz... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] test demo with 1.8... [smoker] got 6231 hits for query "lucene" [smoker] checkindex with 1.8... [smoker] check Lucene's javadoc JAR [smoker] unpack lucene-8.0.0.zip... [smoker] verify JAR metadata/identity/no javax.* or java.* classes... [smoker] test demo with 1.8... [smoker] got 6231 hits for query "lucene" [smoker] checkindex with 1.8... [smoker] check Lucene's javadoc JAR [smoker] unpack lucene-8.0.0-src.tgz... [smoker] make sure no JARs/WARs in src dist... [smoker] run "ant validate" [smoker] run tests w/ Java 8 and testArgs='-Dtests.slow=false'... [smoker] [smoker] command "export JAVA_HOME="/home/jenkins/tools/java/latest1.8" PATH="/home/jenkins/tools/java/latest1.8/bin:$PATH" JAVACMD="/home/jenkins/tools/java/latest1.8/bin/java"; ant clean test -Dtests.slow=false" failed: [smoker] Buildfile: /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/tmp/unpack/lucene-8.0.0/build.xml [smoker] [smoker] clean: [smoker][delete] Deleting directory /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/tmp/unpack/lucene-8.0.0/build [smoker] [smoker] ivy-availability-check: [smoker] [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. [smoker] [smoker] -ivy-fail-disallowed-ivy-version: [smoker] [smoker] ivy-fail: [smoker] [smoker] ivy-configure: [smoker] [ivy:configure] :: Apache Ivy 2.4.0 - 20141213170938 :: http://ant.apache.org/ivy/ :: [smoker] [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/tmp/unpack/lucene-8.0.0/top-level-ivy-settings.xml [smoker] [smoker] -clover.load: [smoker] [smoker] resolve-groovy: [smoker] [ivy:cachepath] :: resolving dependencies :: org.codehaus.groovy#groovy-all-caller;working [smoker] [ivy:cachepath] confs: [default] [smoker] [ivy:cachepath] found org.codehaus.groovy#groovy-all;2.4.12 in public [smoker] [ivy:cachepath] :: resolution report :: resolve 864ms :: artifacts dl 3ms [smoker] - [smoker] | |modules|| artifacts | [smoker] | conf | number| search|dwnlded|evicted|| number|dwnlded| [smoker] - [smoker] | default | 1 | 0 | 0 | 0 || 1 | 0 | [smoker] - [smoker] [smoker] -init-totals: [smoker] [smoker] test-core: [smoker] [smoker] -clover.disable: [smoker] [smoker] ivy-availability-check: [smoker] [loadresource] Do not set property disallowed.ivy.jars.list as its length is 0. [smoker] [smoker] -ivy-fail-disallowed-ivy-version: [smoker] [smoker] ivy-fail: [smoker] [smoker] ivy-configure: [smoker] [ivy:configure] :: loading settings :: file = /x1/jenkins/jenkins-slave/workspace/Lucene-Solr-SmokeRelease-master/lucene/build/smokeTestRelease/tmp/unpack/lucene-8.0.0/top-level-ivy-settings.xml [smoker] [smoker] -clover.load: [smoker] [smoker]
[jira] [Commented] (SOLR-11879) avoid creating a new Exception object for EOFException in FastinputStream
[ https://issues.apache.org/jira/browse/SOLR-11879?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341809#comment-16341809 ] Noble Paul commented on SOLR-11879: --- right. I have attached a patch with that change > avoid creating a new Exception object for EOFException in FastinputStream > - > > Key: SOLR-11879 > URL: https://issues.apache.org/jira/browse/SOLR-11879 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Environment: FastI >Reporter: Noble Paul >Assignee: Noble Paul >Priority: Trivial > Attachments: SOLR-11879.patch, SOLR-11879.patch, Screen Shot > 2018-01-24 at 7.26.16 PM.png > > > FastInputStream creates and throws a new EOFException, every time an end of > stream is encountered. This is wasteful as we never use the stack trace > anywhere -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11879) avoid creating a new Exception object for EOFException in FastinputStream
[ https://issues.apache.org/jira/browse/SOLR-11879?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Noble Paul updated SOLR-11879: -- Attachment: SOLR-11879.patch > avoid creating a new Exception object for EOFException in FastinputStream > - > > Key: SOLR-11879 > URL: https://issues.apache.org/jira/browse/SOLR-11879 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Environment: FastI >Reporter: Noble Paul >Assignee: Noble Paul >Priority: Trivial > Attachments: SOLR-11879.patch, SOLR-11879.patch, Screen Shot > 2018-01-24 at 7.26.16 PM.png > > > FastInputStream creates and throws a new EOFException, every time an end of > stream is encountered. This is wasteful as we never use the stack trace > anywhere -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11919) V2 API support for the SystemInfoHandler
Varun Thacker created SOLR-11919: Summary: V2 API support for the SystemInfoHandler Key: SOLR-11919 URL: https://issues.apache.org/jira/browse/SOLR-11919 Project: Solr Issue Type: Improvement Security Level: Public (Default Security Level. Issues are Public) Reporter: Varun Thacker SystemInfoHandler does not have a V2 API. We should have a V2 equivalent for [http://localhost:8983/solr/admin/info/system] -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-7.x-Windows (64bit/jdk1.8.0_144) - Build # 425 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Windows/425/ Java: 64bit/jdk1.8.0_144 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC 14 tests failed. FAILED: junit.framework.TestSuite.org.apache.lucene.index.TestBackwardsCompatibility Error Message: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\backward-codecs\test\J0\temp\lucene.index.TestBackwardsCompatibility_4FDD1E1270AFE325-001\6.2.0-cfs-002: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\backward-codecs\test\J0\temp\lucene.index.TestBackwardsCompatibility_4FDD1E1270AFE325-001\6.2.0-cfs-002 Stack Trace: java.io.IOException: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\backward-codecs\test\J0\temp\lucene.index.TestBackwardsCompatibility_4FDD1E1270AFE325-001\6.2.0-cfs-002: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\backward-codecs\test\J0\temp\lucene.index.TestBackwardsCompatibility_4FDD1E1270AFE325-001\6.2.0-cfs-002 at __randomizedtesting.SeedInfo.seed([4FDD1E1270AFE325]:0) at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329) at org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216) at com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: junit.framework.TestSuite.org.apache.lucene.store.TestMultiMMap Error Message: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\core\test\J1\temp\lucene.store.TestMultiMMap_FE44E13CFBABF78A-001\testSeekSliceZero-017: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\core\test\J1\temp\lucene.store.TestMultiMMap_FE44E13CFBABF78A-001\testSeekSliceZero-017 Stack Trace: java.io.IOException: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\core\test\J1\temp\lucene.store.TestMultiMMap_FE44E13CFBABF78A-001\testSeekSliceZero-017: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-7.x-Windows\lucene\build\core\test\J1\temp\lucene.store.TestMultiMMap_FE44E13CFBABF78A-001\testSeekSliceZero-017 at __randomizedtesting.SeedInfo.seed([FE44E13CFBABF78A]:0) at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329) at org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216) at com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: junit.framework.TestSuite.org.apache.lucene.search.suggest.analyzing.AnalyzingSuggesterTest Error Message: Could not remove the following files (in the order of attempts):
[jira] [Created] (SOLR-11918) Document usage of SystemInfoHandler at the node level
Varun Thacker created SOLR-11918: Summary: Document usage of SystemInfoHandler at the node level Key: SOLR-11918 URL: https://issues.apache.org/jira/browse/SOLR-11918 Project: Solr Issue Type: Improvement Security Level: Public (Default Security Level. Issues are Public) Reporter: Varun Thacker [http://localhost:8983/solr/admin/info/system] gives us info about the node . It's useful for monitoring scripts to use some information from here. Currently it's not documented in the ref guide . Perhaps the best place would be a section under "Deployment and Operations" for best practices on monitoring a cluster. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11916) new SortableTextField using docValues built from the original string input
[ https://issues.apache.org/jira/browse/SOLR-11916?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Hoss Man updated SOLR-11916: Description: I propose adding a new SortableTextField subclass that would functionally work the same as TextField except: * {{docValues="true|false"}} could be configured, with the default being "true" * The docValues would contain the original input values (just like StrField) for sorting (or faceting) ** By default, to protect users from excessively large docValues, only the first 1024 of each field value would be used – but this could be overridden with configuration. Consider the following sample configuration: {code:java} ... ... {code} Given a document with a title of "Solr In Action" Users could: * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} * Sort documents by title ( {{sort=title asc}} ) such that this document's sort value would be "Solr In Action" If another document had a "title" value that was longer then 1024 chars, then the docValues would be built using only the first 1024 characters of the value (unless the user modified the configuration) This would be functionally equivalent to the following existing configuration - including the on disk index segments - except that the on disk DocValues would refer directly to the "title" field, reducing the total number of "field infos" in the index (which has a small impact on segment housekeeping and merge times) and end users would not need to sort on an alternate "title_string" field name - the original "title" field name would always be used directly. {code:java} {code} was: I propose adding a new SortableTextField subclass that would functionally work the same as TextField except: * {{docValues="true|false"}} could be configured, with the default being "true" * The docValues would contain the original input values (just like StrField) for sorting (or faceting) ** By default, to protect users from excessively large docValues, only the first 1024 of each field value would be used -- but this could be overridden with configuration. Consider the following sample configuration: {code} ... ... {code} Given a document with a title of "Solr In Action" Users could: * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} * Sort documents by title ( {{sort=title asc}} ) such that this document's sort value would be "Solr In Action" If another document had a "title" value that was longer then 1024 chars, then the docValues would be built using only the first 1024 characters of the value (unless the user modified the configuration) This would be functionally equivalent to the following existing configuration - including the on disk index segments - except that the on disk DocValues would refer directly to the "title" field, reducing the total number of "field infos" in the index (which has a small impact on segment housekeeping and merge times) and end users would not need to sort on an alternate "title_string" field name - the original "title" field name would always be used directly. {code} {code} NOTE: I edited the issue description to update the example configuration from using {{maxChars="1024"}} to {{maxCharsForDocValues="1024"}} ... i forgot when creating this Jira that that i had made that option a bit more verbose in the patch to avoid any risk that people might asume it limited the number of characters being *indexed* > new SortableTextField using docValues built from the original string input > -- > > Key: SOLR-11916 > URL: https://issues.apache.org/jira/browse/SOLR-11916 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Hoss Man >Assignee: Hoss Man >Priority: Major > Attachments: SOLR-11916.patch > > > I propose adding a new SortableTextField subclass that would functionally > work the same as TextField except: > * {{docValues="true|false"}} could be configured, with the default being > "true" > * The docValues would contain the original input values (just like StrField) > for sorting (or faceting) > ** By default, to protect users from excessively large docValues, only the > first 1024 of each field value would be used – but this could be overridden > with configuration. > > Consider the following sample configuration: > {code:java} > indexed="true" docValues="true" stored="true" multiValued="false"/> > > >... > > >... > > > {code} > Given a document with a title of "Solr In Action" > Users could: > * Search for individual (indexed) terms in the "title" field: > {{q=title:solr}} > * Sort documents by title (
[jira] [Commented] (SOLR-11916) new SortableTextField using docValues built from the original string input
[ https://issues.apache.org/jira/browse/SOLR-11916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341765#comment-16341765 ] Hoss Man commented on SOLR-11916: - Hmmm... good point. I thought that since this new type didn't go out of it's way to enable "useDocValuesAsStored" that it was a non-issue and the docValues would _never_ be used in place of a stored value (but even if that were true, we should definitely have a test proving it) Skimming the relevant code now I realize that as a FieldProperty, the schemaVersion is the only thing that drives the (default) value of USE_DOCVALUES_AS_STORED regardless of the FieldType impl – so you are absolutely correct, we need to do "something" in {{SortableTextField}} to account for this propery. {quote}Perhaps this fieldType overrides useDocValuesAsStored() to see if maxChars=-1 (no limit) so it can vary it's output based on that? {quote} Hmm ... My concern with that approach is that it might be confusing the users how explicitly set {{useDocValuesAsStored="true" stored="false"}} on a fieldType (or field) – perhaps w/o even being aware of the default maxChars safety valve – and then don't understand why they aren't getting any values back? One possibility would be be for {{SortableTextField.init}} to override the (implicit) default {{useDocValuesAsStored=(schemaVersion>1.6)}} with it's own default based on {{useDocValuesAsStored=(maxChars==-1)}} _and_ fail with a server error (on init) if a configuration includes an explicit {{useDocValuesAsStored=true maxChars="anything other then -1"}} ? Personally, my vote would be – at least initially – to just say "useDocValuesAsStored is not supported for SortableTextField", set the default appropriately & fail on init if anyone tries to explicitly set it to "true" but since FieldProperites can be set on both the {{fieldType}} and the {{field}} I don't think it would even be possible for a fieldType to *stop* someone from creating a {{ new SortableTextField using docValues built from the original string input > -- > > Key: SOLR-11916 > URL: https://issues.apache.org/jira/browse/SOLR-11916 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Hoss Man >Assignee: Hoss Man >Priority: Major > Attachments: SOLR-11916.patch > > > I propose adding a new SortableTextField subclass that would functionally > work the same as TextField except: > * {{docValues="true|false"}} could be configured, with the default being > "true" > * The docValues would contain the original input values (just like StrField) > for sorting (or faceting) > ** By default, to protect users from excessively large docValues, only the > first 1024 of each field value would be used -- but this could be overridden > with configuration. > > Consider the following sample configuration: > {code} > indexed="true" docValues="true" stored="true" multiValued="false"/> > > >... > > >... > > > {code} > Given a document with a title of "Solr In Action" > Users could: > * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} > * Sort documents by title ( {{sort=title asc}} ) such that this document's > sort value would be "Solr In Action" > If another document had a "title" value that was longer then 1024 chars, then > the docValues would be built using only the first 1024 characters of the > value (unless the user modified the configuration) > This would be functionally equivalent to the following existing configuration > - including the on disk index segments - except that the on disk DocValues > would refer directly to the "title" field, reducing the total number of > "field infos" in the index (which has a small impact on segment housekeeping > and merge times) and end users would not need to sort on an alternate > "title_string" field name - the original "title" field name would always be > used directly. > {code} > indexed="true" docValues="true" stored="true" multiValued="false"/> > indexed="false" docValues="true" stored="false" multiValued="false"/> > > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-8362) Add docValues support for TextField
[ https://issues.apache.org/jira/browse/SOLR-8362?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341721#comment-16341721 ] Hoss Man commented on SOLR-8362: FWIW, I've created 2 related issues that folks watching this Jira may want to look at: * SOLR-11916 - proposes (w/patch) a SortableTextField subclass that would provide a straightforward way for people index & search analyzed test content while still sorting on the original string (via docValues * SOLR-11917 - a very broad scope "hypothetical roadmap" were i've posted a lot of brain storming related to the improving the "ease of use" to some common usescases i've seen in dealing with text fields & analyzers that typical involves a lot of copyField work arrounds. A big part of that issue is discussing how to tackle configurablilty in terms of supporting docValues on TextField and what analysis should be used there. > Add docValues support for TextField > --- > > Key: SOLR-8362 > URL: https://issues.apache.org/jira/browse/SOLR-8362 > Project: Solr > Issue Type: Improvement >Reporter: Hoss Man >Priority: Major > > At the last lucene/solr revolution, Toke asked a question about why TextField > doesn't support docValues. The short answer is because no one ever added it, > but the longer answer was because we would have to think through carefully > the _intent_ of supporting docValues for a "tokenized" field like TextField, > and how to support various conflicting usecases where they could be handy. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11916) new SortableTextField using docValues built from the original string input
[ https://issues.apache.org/jira/browse/SOLR-11916?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341722#comment-16341722 ] David Smiley commented on SOLR-11916: - Cool! {{maxChars}} raises some concerns that need to be documented and cared for in the code. We'd have a field with docValues, but it's value isn't necessarily a substitute for the "stored" version. We have code in places right now that will need to know about this exception to useDocValueAsStored. Perhaps this fieldType overrides useDocValuesAsStored() to see if maxChars=-1 (no limit) so it can vary it's output based on that? > new SortableTextField using docValues built from the original string input > -- > > Key: SOLR-11916 > URL: https://issues.apache.org/jira/browse/SOLR-11916 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Hoss Man >Assignee: Hoss Man >Priority: Major > Attachments: SOLR-11916.patch > > > I propose adding a new SortableTextField subclass that would functionally > work the same as TextField except: > * {{docValues="true|false"}} could be configured, with the default being > "true" > * The docValues would contain the original input values (just like StrField) > for sorting (or faceting) > ** By default, to protect users from excessively large docValues, only the > first 1024 of each field value would be used -- but this could be overridden > with configuration. > > Consider the following sample configuration: > {code} > indexed="true" docValues="true" stored="true" multiValued="false"/> > > >... > > >... > > > {code} > Given a document with a title of "Solr In Action" > Users could: > * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} > * Sort documents by title ( {{sort=title asc}} ) such that this document's > sort value would be "Solr In Action" > If another document had a "title" value that was longer then 1024 chars, then > the docValues would be built using only the first 1024 characters of the > value (unless the user modified the configuration) > This would be functionally equivalent to the following existing configuration > - including the on disk index segments - except that the on disk DocValues > would refer directly to the "title" field, reducing the total number of > "field infos" in the index (which has a small impact on segment housekeeping > and merge times) and end users would not need to sort on an alternate > "title_string" field name - the original "title" field name would always be > used directly. > {code} > indexed="true" docValues="true" stored="true" multiValued="false"/> > indexed="false" docValues="true" stored="false" multiValued="false"/> > > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-Tests-master - Build # 2277 - Still unstable
Build: https://builds.apache.org/job/Lucene-Solr-Tests-master/2277/ 2 tests failed. FAILED: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testSetProperties Error Message: Expected 8 triggers but found: [x0, x1, x2, x3, x4, x5, x6] expected:<8> but was:<7> Stack Trace: java.lang.AssertionError: Expected 8 triggers but found: [x0, x1, x2, x3, x4, x5, x6] expected:<8> but was:<7> at __randomizedtesting.SeedInfo.seed([17AF1F5CCA8EC928:7CCBC81579A35CAC]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.failNotEquals(Assert.java:647) at org.junit.Assert.assertEquals(Assert.java:128) at org.junit.Assert.assertEquals(Assert.java:472) at org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testSetProperties(TriggerIntegrationTest.java:1322) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testMetricTrigger Error
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341708#comment-16341708 ] David Smiley commented on SOLR-11722: - In retrospect... I wonder if it would make the overall API less busy if create-alias had these options instead of creating a new API command? We already have it combined at the CreateAliasCmd level. When it comes time to delete the whole thing, again, we just add logic to delete underlying collections to the existing alias deletion. > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Assignee: David Smiley >Priority: Major > Fix For: 7.3 > > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11917) A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues
[ https://issues.apache.org/jira/browse/SOLR-11917?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341693#comment-16341693 ] Hoss Man commented on SOLR-11917: - h2. *S2.2*: Support configuring fieldTypes with many arbitrarily named analyzers h3. *S2.2G*: Goal The initial goal i considered was based solely on SOLR-5053: * Advanced users may want to use arbitrary analyzers at query time – either by "name" when parsing a query, or based on special logic in custom plugins. But when considered in conjunction with some of the other Usecases discussed here, I realized there was a lot of potenial for general purpose improvements to the "plumbing" of IndexSchema to supporting multiple analyzers... * Multilingual "fields" _might_ benefit from having "analyzer per lang" configurable on a field type (see *S2.1.STRAW1* above) * It would allow us to define arbitrary "docValues" analyzer for TextFields (see *S1.3* below) h3. *S2.2BAD*: Bad Ideas – aka: NOT-Suggested Approach(es) For the sake of completeness, let's set aside for a moment the broader benefit of a robust solution to supporting arbitrary analyzers for TextField, and consider how we might tackle the "user wants to pick an analyzer by name at query time" with the least amount of development work (ie: "rapid prototype") ... *Bad Idea #1:* * subclass of TextField * at query time, getFieldQuery() looks for an "ftanalyzer" local param from QParser, ** if exists: use that to find some other FieldType whose Analyzer we use in place of our own * NOTE: see *S2.1.HITCH* above about SolrQueryParserBase/QueryBuilder in previous topic – also apply here except in simple cases ** Although in a super special case situation like this, maybe a limitation of "only works with 'field' QParser" would be fine? ... but in that case, see Bad Idea #2... *Bad Idea #2:* * assume this usecase/situation is so niche, that we don't need general query parser support * assume we can tell people the *must* use a specific QParser to do it * No need for a special TextField subclass – instead create a new 'ForceAnalyzerQParser' ** use it just like the "field" QParser, but it also takes in a "forceFieldType" localparam ** use the "forceFieldType" to pick a fieldType by name, and ask it for getFieldQuery(...) using the original field name * Example: {noformat} # query the 'title' field for "how now brown cow" using the analyzer from the 'nostopwords' q={!forceanalyzer f=title forceFieldType='nostopwords'}how+now+brown+cow {noformat} h3. *S2.2A*: Suggested Approach – aka: Good Idea AFAICT, This is what it would take to support arbitrary analyzers on TextFields "the right way"... * FieldType method deprecation/replacement ** deprecate/replace these overly specific methods in FieldType... {code:java} public final void setQueryAnalyzer(Analyzer analyzer) public void setIsExplicitQueryAnalyzer(boolean isExplicitQueryAnalyzer) public boolean isExplicitQueryAnalyzer() // public final void setIndexAnalyzer(Analyzer analyzer) public void setIsExplicitAnalyzer(boolean explicitAnalyzer) public boolean isExplicitAnalyzer() {code} ** With code like these... {code:java} /** For setting the (explicitly) configured analyzers, key is type, can throw an exception if a type is not allowed/supported, but must ultimately call super for getExplicitAnalyzers() to work Default impl calls changeExplicitAnalyzers() for validation */ public final void setExplicitAnalyzers(Map) throws SolrException /** exactly what was configured/passed to setExplicitAnalyzers */ public final Map getExplicitAnalyzers() /** can create & track any implicit /synthetic analyzers needed based on what's explicitly configured. can throw an exception if an explicitly configured type is not allowed/supported. See below for discussion of default impl */ public void changeExplicitAnalyzers(Map ) throws SolrException /* returns either an explicit analyzer configured with that type, or may return an alternate (or synthetic created) analyzer deemed suitable for that purpose, or null if nothing suitable. default impl only supports "index" and "query" */ public Analyzer getAnalyzer(String type) throws SolrException {code} * {{FieldTypePluginLoader.create}} refactoring... ** instead of looking for specific analyzers, call {{readAnalyzer}} on all {{"./analyzer"}} XML nodes *** if no {{type}} attribute, use {{type=null}} ** build up {{Map }} using everything found *** if, as the Map is being built, any key already exists, fail w/exception about the field type having multiple analyzers with the same type. ** delegate to {{FieldType.setExplicitAnalyzers()}} *** all the existing special case handling in this method should be refactored into {{FieldType.changeExplicitAnalyzers() ...}} {{HasImplicitIndexAnalyzer}} query
[jira] [Commented] (SOLR-11917) A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues
[ https://issues.apache.org/jira/browse/SOLR-11917?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341692#comment-16341692 ] Hoss Man commented on SOLR-11917: - h2. *S2.1*: Easy Multi-Language Querying (SOLR-6492) h3. *S2.1G*: Goal Simplified indexing & querying of text in diff languages w/o the query clients being _required_ to know about a lot of language specific variant field names. At index time we want things to be "easy" for clients wending documents, regardless of whether they already know the lang of each field value in advance, or if they want solr to do langauge detection. h3. *S2.1A*: Suggested Approach {panel:title=Refresher: Summary of Solr In Action (SIA) code linked to from} SOLR-6492 *What's included & how it works...* * custom update process & custom field type ** processor is subclass of existing lang detect update processor *** super class normally adds a field with languages in doc, or renames fields to include language (ie: text => text_de) ** field type is subclass of TextField *** goes out of it's way to override any Analyzer config with a custom one (details below) *** configured with a list of mappings from langid to other (existing) field types * Index Time: ** update processor delegates to super to detect languages but instead of (in addition to?) super class's behavior of adding a language field to doc, or renaming the field with suffix, the custom processor "decorates" the values with the detected language(s)... ** for any field where the field type is our custom type: *** "decorate" each of the field values with either: the langs of the whole doc the langs of the field (after re-running lang detect on all values in just that field) the langs of the individual field value (after re-running lang detect on just that field value) ** other processors can then run as normal, and eventually the IndexSchema is asked to build up the IndexableFields for this doc, and it delegates to the (custom) field type for these "decorated" fields... ** field type's custom analyzer looks for these lang "decorations" on each field value *** for every lang found, go fetch the analyzer from the mapped field type it's configured with *** create a token stream that delegates to all the other analyzers & merges the resulting token streams *** all this custom delegation/merging tokenstream stuff is (optionally/wisely) wrapped in RemoveDuplicatesTokenFilter since there can be lots of dup tokens for similar languages. * Query Time: ** the query string provided by the user can be "decorated" with a list of languages ** the normal plumbing of TextField analyzes the query string, delegating to the various analyzers *** AFAICT: this means MultiTermPhraseQueries are frequently produced? * NOTE: as mentioned in Trey's LR talk for 2014, a "perk" of this solution (over using diff fields per languages) is that mixing languages in one field value can – in theory – still produce useful phrase queries, even if the non-correct analyzers butcher the terms in other languages such that a single phrase produced by either language analyzer wouldn't match the original string ** [https://www.youtube.com/watch?v=MQ6WtBw8T_U] ** BUT: it's not really clear if/how useful/important this is. _Does any one have any actual usecases for this???_ *The Fiddly / Awkward / Problematic Bits Of All This Existing Code* * language "decoration" is super hackish ** index time: *** the update processor prepends them as a string *** not a lot of easy improvements currently possible given the current SolrInputDocument / UpdateProcessor / DocumentBuilder structure / code paths fixing this "THE RIGHT WAY" would probably require some pretty big changes to all this code so SolrInputField could support arbitrary metadata (instead of just "boost" like it does today) and passing the SolrInputFields all the way to the FieldType's createFields method the hackish way to do this might be to follow in the footsteps of atomic update with "field value may be a map containing magic keys", but... * this would probably break Atomic Updates (unexpected keys in the Maps it thinks it owns) * this was already a super heinous API hack and hacks this heinous should not be reworded by being copied * Even if we did this, i'm not certain the FieldType's createFields() would get the full Map w/o a bunch of other changes in the middle – if we're going to have to change existing DocumentBuilder/IndexSchema code to make this work, let's not be heinous about it. ** query time: *** user must prefixing the terms _inside_ the query strings – _after_ the field name *** example: {{q=my_multi_lang_field:"en,es|Hello there compadre"}} *** fixing this in a sane way should be really straight forward... all of the "public Query getFoo(...)" methods a FieldType must implement take in the QParser
[jira] [Commented] (SOLR-11917) A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues
[ https://issues.apache.org/jira/browse/SOLR-11917?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341687#comment-16341687 ] Hoss Man commented on SOLR-11917: - h1. Some Concrete Thoughts On *S*olutions *NOTE:* While there is a one-to-one corrispondice in the naming/numbering of the *U*secases listed above and the proposed *S*olutions listed below, I have ordered the *S*olutions in the way that I think makes the most sense from an "explaining how to achieve things" standpoint. h2. *S1.1*: A 'SortableTextField' that builds docValues using the original text input h3. *S1.1G*: Goal A new SortableTextField subclass would be added that would functionally work the same as TextField except: * {{docValues="true|false"}} could be configured, with the default being "true" * The docValues would contain (a prefix of) the original input values (just like StrField) for sorting (or faceting) ** By default, to protect users from excessively large docValues, only the first 1024 of each field value would be used – but this could be overridden with configuration. h3. *S1.1E*: Example Usage Consider the following sample configuration: {code:java} ... ... {code} Given a document with a title of "Solr In Action" Users could: * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} * Sort documents by title ( {{sort=title asc}} ) such that this document's sort value would be "Solr In Action" If another document had a "title" value that was longer then 1024 chars, then the docValues would be built using only the first 1024 characters of the value (unless the user modified the configuration) NOTE: This would be functionally equivalent to the following existing configuration - including the on disk index segments - except that the on disk DocValues would refer directly to the "title" field, reducing the total number of "field infos" in the index (which has a small impact on segment housekeeping and merge times) and end users would not need to sort on an alternate "title_string" field name - the original "title" field name would always be used directly. {code:java} {code} h3. *S1.1A*: Suggested Approach (SOLR-11916) While experimenting with a quick POC for this idea, I actually wound up building a {{SortableTextField}} that is feature complete. See patch in SOLR-11916. NOTE: If/when *S1.3A* is implemented, this SortableTextField could be refactored to be syntactic sugar for TextField w/ some added defaults – see below. h2. *S1.2*: A 'TermDocValuesTextField' that builds docValues using the post-analysis terms h3. *S1.2G*: Goal A new TermDocValuesTextField subclass would be added that would functionally work the same as TextField except: * {{docValues="true|false"}} could be configured, with the default being "true" * Instances of fields using this type would support faceting (or sorting), using DocValues build from the terms produced by the "index" analyzer ** NOTE: Sorting on this type of field would only make sense in some special circumstances depending on the analyzer used (ie: KeywordTokenizer) h3. *S1.2E*: Example Usage Consider the following sample configuration {code:java} ... {code} Given a document with an author of "Grainger, Trey" and keywords value of of "book lucene solr" Users could: * Search for individual (indexed) terms in the "keywords" field: q=keywords:book * Facet on the keywords field (facet.field=keywords) such that if this were the only document in the index, the facet counts would be "book=1, lucene=1, solr=1" * Sort documents by author (sort=title asc) such that this document's sort value would be "grainger, trey" NOTE: This should be functionally equivalent to users faceting on a "keywords" TextField (or sorting on an "author" TextField using KeywordTokenizer) today, except that the facet/sort values would come from DocValues (written at indexing time), and not the FieldCache (built on the fly at query time and held solely in RAM). h3. *S1.2A*: Suggested Approach * Add a new TermDocValuesTextField subclass of TextField * if docValues="true": ** Augment the configured "index" analyzer to record each resulting token from the stream in a Set ** When indexing, pre-analyze/buffer the token stream and use the recorded Set of tokens to build additional SortedSetDocValuesField instances in the underling indexed document * OPTIMIZATION?: We may be able to avoid the pre-analysis/buffering of the TokenStream and instead hook into the low level indexing code with a callback to generate the SortedSetDocValuesField instances on the fly as the DocumentsWriter reads from the (original) TokenStream ... needs experimentation/refactoring once we have some tests. NOTE: If/when *S1.3A* is implemented, this TermDocValuesTextField could be refactored to be syntactic sugar for TextField w/ some added
[jira] [Commented] (SOLR-11917) A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues
[ https://issues.apache.org/jira/browse/SOLR-11917?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341681#comment-16341681 ] Hoss Man commented on SOLR-11917: - h2. Hoss'ss High Level Thoughts on these Goals / *U*secases While it's certainly possible to takle some of these objectives independently of the others, either from a standpoint of of incremental feature delivery, or from a standpoint of "end user ease of use" there definitely seems to be some overlap here that's worth considering. In particular: * While there is certainly some non-trivial set of possible implementations that can satisfy both *U2.1* and *U2.2*, my gut impression is that no one implementation will really fit both usecases well in an easy to use/understand way. I'm also pretty confident that the "multi-language" use cases would be easier to solve/build in a "clean" (and easy for users to understand) approach more simply / quickly then any (non-silly) solutions that would support the "let me shoot my self in the foot if I want" objectives. * While I personally don't feel that the *U2.2* usecase is a particularly good idea, the overall "plumbing" involved in supporting this type of usecase would be very helpful towards supporting *U1.3* * Likewise: *U1.1* and *U1.2* should be easy be implement as new FieldTypes independent from the more complex needs of *U1.3*. But if *U1.3* was possible, then there would likely be potential for refactoring to reduce common code and simplify the implementations. > A Potential Roadmap for robust multi-analyzer TextFields w/various options > for configuring docValues > > > Key: SOLR-11917 > URL: https://issues.apache.org/jira/browse/SOLR-11917 > Project: Solr > Issue Type: Wish > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Hoss Man >Assignee: Hoss Man >Priority: Major > > A while back, I was tasked at my day job to brainstorm & design some "smarter > field types" in Solr. In particular to think about: > # How to simplify some of the "special things" people have to know about > Solr behavior when creating their schemas > # How to reduce the number of situations where users have to copy/clone one > "logical field" into multiple "schema felds in order to meet diff use cases > The main result of this thought excercise is a handful of usecases/goals that > people seem to have - many of which are already tracked in existing jiras - > along with a high level design/roadmap of potential solutions for these goals > that can be implemented incrementally to leverage some common changes (and > what those changes might look like). > My intention is to use this jira as a place to share these ideas for broader > community discussion, and as a central linkage point for the related jiras. > (details to follow in a very looong comment) > > NOTE: I am not (at this point) personally committing to following through on > implementing every aspect of these ideas :) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11917) A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues
[ https://issues.apache.org/jira/browse/SOLR-11917?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341679#comment-16341679 ] Hoss Man commented on SOLR-11917: - _The following notes were compiled over many months and iteratively tweaked/revised – It's likeley that in some cases my comments may be overlooking/ignorant-of comments/ideas/patches related to some of these concepts that were posted added after I wrote them that i just haven't noticed since._ _Also: Jira says my notes are too long for one comment, so i have to break it up into sections_ h1. High Level Goals / *U*secases Talking to various customers about their pain points, and reading up on various jiras lead me to a handful of Text/String related *U*secases that all seemed like they have solutions that could either overlapp, or be in close proximity, when it came to implementation: * *U0*: "I want sane defaults when sorting on multivalued fields, not an error" ** Low hanging fruit already implemented for PrimitiveFieldType subclasses in SOLR-11854 * *U1*: (SOLR-8362) Add docValues support to TextField (or some new subclasses of TextField) – Because... ** *U1.1*: "I want to be able to (efficiently) sort on the original input of a TextField (using docValues)" ** *U1.2*: "I want to be able to (efficiently) facet on (docValues built from) the indexed terms of a TextField ** *U1.3*: "I want to be able to (efficiently) sort/facet on docValues built from analyzed terms using a completely diff analyzer then what i use for searching" *** Example: StandardAnalyzer for searching, but lowercased docValues for sorting. * *U2*: Choose Query Analysis Aspects At Query Time – Because... ** *U2.1*: "I want to be able to do multi-language indexing/querying easily so it only looks like one 'field' name." (SOLR-6492) ** *U2.2*: "I want to be able to have lots of arbitrary analyzers I pick between arbitrarily at query time and maybe shoot myself in the foot but it's ok i'm an expert and i have special needs." (SOLR-5053) *** NOTE: the description of SOLR-5053 does also list multi-lang as a motivation, but some of the examples – like "ignore synonyms" – are definitely broader scope then this. > A Potential Roadmap for robust multi-analyzer TextFields w/various options > for configuring docValues > > > Key: SOLR-11917 > URL: https://issues.apache.org/jira/browse/SOLR-11917 > Project: Solr > Issue Type: Wish > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Hoss Man >Assignee: Hoss Man >Priority: Major > > A while back, I was tasked at my day job to brainstorm & design some "smarter > field types" in Solr. In particular to think about: > # How to simplify some of the "special things" people have to know about > Solr behavior when creating their schemas > # How to reduce the number of situations where users have to copy/clone one > "logical field" into multiple "schema felds in order to meet diff use cases > The main result of this thought excercise is a handful of usecases/goals that > people seem to have - many of which are already tracked in existing jiras - > along with a high level design/roadmap of potential solutions for these goals > that can be implemented incrementally to leverage some common changes (and > what those changes might look like). > My intention is to use this jira as a place to share these ideas for broader > community discussion, and as a central linkage point for the related jiras. > (details to follow in a very looong comment) > > NOTE: I am not (at this point) personally committing to following through on > implementing every aspect of these ideas :) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11917) A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues
Hoss Man created SOLR-11917: --- Summary: A Potential Roadmap for robust multi-analyzer TextFields w/various options for configuring docValues Key: SOLR-11917 URL: https://issues.apache.org/jira/browse/SOLR-11917 Project: Solr Issue Type: Wish Security Level: Public (Default Security Level. Issues are Public) Reporter: Hoss Man Assignee: Hoss Man A while back, I was tasked at my day job to brainstorm & design some "smarter field types" in Solr. In particular to think about: # How to simplify some of the "special things" people have to know about Solr behavior when creating their schemas # How to reduce the number of situations where users have to copy/clone one "logical field" into multiple "schema felds in order to meet diff use cases The main result of this thought excercise is a handful of usecases/goals that people seem to have - many of which are already tracked in existing jiras - along with a high level design/roadmap of potential solutions for these goals that can be implemented incrementally to leverage some common changes (and what those changes might look like). My intention is to use this jira as a place to share these ideas for broader community discussion, and as a central linkage point for the related jiras. (details to follow in a very looong comment) NOTE: I am not (at this point) personally committing to following through on implementing every aspect of these ideas :) -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11916) new SortableTextField using docValues built from the original string input
[ https://issues.apache.org/jira/browse/SOLR-11916?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Hoss Man updated SOLR-11916: Attachment: SOLR-11916.patch > new SortableTextField using docValues built from the original string input > -- > > Key: SOLR-11916 > URL: https://issues.apache.org/jira/browse/SOLR-11916 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Hoss Man >Assignee: Hoss Man >Priority: Major > Attachments: SOLR-11916.patch > > > I propose adding a new SortableTextField subclass that would functionally > work the same as TextField except: > * {{docValues="true|false"}} could be configured, with the default being > "true" > * The docValues would contain the original input values (just like StrField) > for sorting (or faceting) > ** By default, to protect users from excessively large docValues, only the > first 1024 of each field value would be used -- but this could be overridden > with configuration. > > Consider the following sample configuration: > {code} > indexed="true" docValues="true" stored="true" multiValued="false"/> > > >... > > >... > > > {code} > Given a document with a title of "Solr In Action" > Users could: > * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} > * Sort documents by title ( {{sort=title asc}} ) such that this document's > sort value would be "Solr In Action" > If another document had a "title" value that was longer then 1024 chars, then > the docValues would be built using only the first 1024 characters of the > value (unless the user modified the configuration) > This would be functionally equivalent to the following existing configuration > - including the on disk index segments - except that the on disk DocValues > would refer directly to the "title" field, reducing the total number of > "field infos" in the index (which has a small impact on segment housekeeping > and merge times) and end users would not need to sort on an alternate > "title_string" field name - the original "title" field name would always be > used directly. > {code} > indexed="true" docValues="true" stored="true" multiValued="false"/> > indexed="false" docValues="true" stored="false" multiValued="false"/> > > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Reopened] (SOLR-11782) LatchWatcher.await doesn’t protect against spurious wakeup
[ https://issues.apache.org/jira/browse/SOLR-11782?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tomás Fernández Löbbe reopened SOLR-11782: -- > LatchWatcher.await doesn’t protect against spurious wakeup > -- > > Key: SOLR-11782 > URL: https://issues.apache.org/jira/browse/SOLR-11782 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tomás Fernández Löbbe >Assignee: Tomás Fernández Löbbe >Priority: Minor > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11782.patch, SOLR-11782.patch, SOLR-11782.patch > > > I noticed that {{LatchWatcher.await}} does: > {code} > public void await(long timeout) throws InterruptedException { > synchronized (lock) { > if (this.event != null) return; > lock.wait(timeout); > } > } > {code} > while the recommendation of lock.wait is to check the wait condition even > after the method returns in case of spurious wakeup. {{lock}} is a private > local field to which {{notifyAll}} is called only after a zk event is being > handled. I think we should check the {{await}} method to something like: > {code} > public void await(long timeout) throws InterruptedException { > assert timeout > 0; > long timeoutTime = System.currentTimeMillis() + timeout; > synchronized (lock) { > while (this.event == null) { > long nextTimeout = timeoutTime - System.currentTimeMillis(); > if (nextTimeout <= 0) { > return; > } > lock.wait(nextTimeout); > } > } > } > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11782) LatchWatcher.await doesn’t protect against spurious wakeup
[ https://issues.apache.org/jira/browse/SOLR-11782?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341665#comment-16341665 ] Tomás Fernández Löbbe commented on SOLR-11782: -- I see. I thought that could be the case, but wasn't sure, since the javadocs don't say that explicitly, and they also say: {code} This method is behaviorally equivalent to: {@code awaitNanos(unit.toNanos(time)) > 0} {code} This makes the await method simpler, I can just do: {code} public void await(long timeoutMs) throws InterruptedException { lock.lock(); try { if (this.event != null) { return; } eventReceived.await(timeoutMs, TimeUnit.MILLISECONDS); } finally { lock.unlock(); } } {code} > LatchWatcher.await doesn’t protect against spurious wakeup > -- > > Key: SOLR-11782 > URL: https://issues.apache.org/jira/browse/SOLR-11782 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tomás Fernández Löbbe >Assignee: Tomás Fernández Löbbe >Priority: Minor > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11782.patch, SOLR-11782.patch, SOLR-11782.patch > > > I noticed that {{LatchWatcher.await}} does: > {code} > public void await(long timeout) throws InterruptedException { > synchronized (lock) { > if (this.event != null) return; > lock.wait(timeout); > } > } > {code} > while the recommendation of lock.wait is to check the wait condition even > after the method returns in case of spurious wakeup. {{lock}} is a private > local field to which {{notifyAll}} is called only after a zk event is being > handled. I think we should check the {{await}} method to something like: > {code} > public void await(long timeout) throws InterruptedException { > assert timeout > 0; > long timeoutTime = System.currentTimeMillis() + timeout; > synchronized (lock) { > while (this.event == null) { > long nextTimeout = timeoutTime - System.currentTimeMillis(); > if (nextTimeout <= 0) { > return; > } > lock.wait(nextTimeout); > } > } > } > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11916) new SortableTextField using docValues built from the original string input
Hoss Man created SOLR-11916: --- Summary: new SortableTextField using docValues built from the original string input Key: SOLR-11916 URL: https://issues.apache.org/jira/browse/SOLR-11916 Project: Solr Issue Type: Improvement Security Level: Public (Default Security Level. Issues are Public) Reporter: Hoss Man Assignee: Hoss Man I propose adding a new SortableTextField subclass that would functionally work the same as TextField except: * {{docValues="true|false"}} could be configured, with the default being "true" * The docValues would contain the original input values (just like StrField) for sorting (or faceting) ** By default, to protect users from excessively large docValues, only the first 1024 of each field value would be used -- but this could be overridden with configuration. Consider the following sample configuration: {code} ... ... {code} Given a document with a title of "Solr In Action" Users could: * Search for individual (indexed) terms in the "title" field: {{q=title:solr}} * Sort documents by title ( {{sort=title asc}} ) such that this document's sort value would be "Solr In Action" If another document had a "title" value that was longer then 1024 chars, then the docValues would be built using only the first 1024 characters of the value (unless the user modified the configuration) This would be functionally equivalent to the following existing configuration - including the on disk index segments - except that the on disk DocValues would refer directly to the "title" field, reducing the total number of "field infos" in the index (which has a small impact on segment housekeeping and merge times) and end users would not need to sort on an alternate "title_string" field name - the original "title" field name would always be used directly. {code} {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Windows (64bit/jdk1.8.0_144) - Build # 7141 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Windows/7141/ Java: 64bit/jdk1.8.0_144 -XX:+UseCompressedOops -XX:+UseParallelGC 11 tests failed. FAILED: junit.framework.TestSuite.org.apache.lucene.codecs.perfield.TestPerFieldPostingsFormat Error Message: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.codecs.perfield.TestPerFieldPostingsFormat_9DCF183299E3534C-001\testPostingsFormat.testExact-003: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.codecs.perfield.TestPerFieldPostingsFormat_9DCF183299E3534C-001\testPostingsFormat.testExact-003 Stack Trace: java.io.IOException: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.codecs.perfield.TestPerFieldPostingsFormat_9DCF183299E3534C-001\testPostingsFormat.testExact-003: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.codecs.perfield.TestPerFieldPostingsFormat_9DCF183299E3534C-001\testPostingsFormat.testExact-003 at __randomizedtesting.SeedInfo.seed([9DCF183299E3534C]:0) at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329) at org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216) at com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: junit.framework.TestSuite.org.apache.lucene.index.TestDemoParallelLeafReader Error Message: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005\index: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005\index C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005 Stack Trace: java.io.IOException: Could not remove the following files (in the order of attempts): C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005\index: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005\index C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005: java.nio.file.DirectoryNotEmptyException: C:\Users\jenkins\workspace\Lucene-Solr-master-Windows\lucene\build\core\test\J0\temp\lucene.index.TestDemoParallelLeafReader_9DCF183299E3534C-001\tempDir-005 at __randomizedtesting.SeedInfo.seed([9DCF183299E3534C]:0) at org.apache.lucene.util.IOUtils.rm(IOUtils.java:329) at org.apache.lucene.util.TestRuleTemporaryFilesCleanup.afterAlways(TestRuleTemporaryFilesCleanup.java:216) at com.carrotsearch.randomizedtesting.rules.TestRuleAdapter$1.afterAlways(TestRuleAdapter.java:31) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:43) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at
[jira] [Commented] (SOLR-11066) Implement a scheduled trigger
[ https://issues.apache.org/jira/browse/SOLR-11066?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341645#comment-16341645 ] David Smiley commented on SOLR-11066: - Shouldn't startTime end in a 'Z', as is standard in Solr? Why use a custom SimpleDateFormat, which by the way is an old API?Simply parse using DateMathParser.parse(str).toInstant(), and an Instant has a toString() of the standard ISO-8601 with 'Z' DEFAULT_GRACE_TIME_MS could be more clearly defined as: {{TimeUnit.MINUTES.toMillis(15)}} > Implement a scheduled trigger > - > > Key: SOLR-11066 > URL: https://issues.apache.org/jira/browse/SOLR-11066 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: AutoScaling, SolrCloud >Reporter: Shalin Shekhar Mangar >Assignee: Shalin Shekhar Mangar >Priority: Major > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11066.patch > > > Implement a trigger that runs on a fixed interval say every 1 hour or every > 24 hours starting at midnight etc. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11915) SolrCloud should log values it has for user-configurable settings, including zkClientTimeout
[ https://issues.apache.org/jira/browse/SOLR-11915?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341642#comment-16341642 ] Shawn Heisey commented on SOLR-11915: - Patch currently includes "CloudConfig: " on each logged line. Because the default log settings include the class name, this could be redundant. > SolrCloud should log values it has for user-configurable settings, including > zkClientTimeout > > > Key: SOLR-11915 > URL: https://issues.apache.org/jira/browse/SOLR-11915 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Affects Versions: 7.2.1 >Reporter: Shawn Heisey >Priority: Minor > Attachments: SOLR-11915.patch > > -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11915) SolrCloud should log values it has for user-configurable settings, including zkClientTimeout
[ https://issues.apache.org/jira/browse/SOLR-11915?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Shawn Heisey updated SOLR-11915: Attachment: SOLR-11915.patch > SolrCloud should log values it has for user-configurable settings, including > zkClientTimeout > > > Key: SOLR-11915 > URL: https://issues.apache.org/jira/browse/SOLR-11915 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Affects Versions: 7.2.1 >Reporter: Shawn Heisey >Priority: Minor > Attachments: SOLR-11915.patch > > -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11915) SolrCloud should log values it has for user-configurable settings, including zkClientTimeout
Shawn Heisey created SOLR-11915: --- Summary: SolrCloud should log values it has for user-configurable settings, including zkClientTimeout Key: SOLR-11915 URL: https://issues.apache.org/jira/browse/SOLR-11915 Project: Solr Issue Type: Improvement Security Level: Public (Default Security Level. Issues are Public) Components: SolrCloud Affects Versions: 7.2.1 Reporter: Shawn Heisey -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11914) Remove/move questionable SolrParams methods
[ https://issues.apache.org/jira/browse/SOLR-11914?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] David Smiley updated SOLR-11914: Priority: Minor (was: Major) > Remove/move questionable SolrParams methods > --- > > Key: SOLR-11914 > URL: https://issues.apache.org/jira/browse/SOLR-11914 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrJ >Reporter: David Smiley >Priority: Minor > Labels: newdev > > {{MapgetAll(Map sink, Collection > params)}} > Is only used by the CollectionsHandler, and has particular rules about how it > handles multi-valued data that make it not very generic, and thus I think > doesn't belong here. Furthermore the existence of this method is confusing > in that it gives the user another choice against it use versus toMap (there > are two overloaded variants). > {{SolrParams toFilteredSolrParams(List names)}} > Is only called in one place, and something about it bothers me, perhaps just > the name or that it ought to be a view maybe. > {{static Map toMap(NamedList params)}} > Isn't used and I don't like it; it doesn't even involve a SolrParams! Legacy > of 2006. > {{static Map toMultiMap(NamedList params)}} > It doesn't even involve a SolrParams! Legacy of 2006 with some updates since. > Used in some places. Perhaps should be moved to NamedList as an instance > method. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11066) Implement a scheduled trigger
[ https://issues.apache.org/jira/browse/SOLR-11066?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341625#comment-16341625 ] Shalin Shekhar Mangar commented on SOLR-11066: -- This patch is a first cut of the feature. Integration tests are still pending. The configuration of the trigger supports the following parameter: {code} { "name": "scheduled_trigger_1", "type": "scheduled", "startTime": "2018-01-26T21:00:00", "every": "+1DAY", "graceTime": "6" } {code} # {{startTime}} is a date in the format {{-MM-dd'T'HH:mm:ss}}. If not specified then it will default to the trigger's creation time. # {{every}} is a string which follows Solr's date math parser syntax e.g. {{+1DAY}}, {{+12HOURS}} etc. (note date math supports subtraction but using such expression will ensure that the trigger never produces an event # {{graceTime}} is the number of milliseconds after the next scheduled time within which the trigger will fire an event (if it was not able to fire at the exact right time). It defaults to 15 minutes. The trigger itself is quite simple. It applies the {{every}} date match expression on the {{startTime}} or the last fire time to derive the next scheduled time and if current time is greater than next scheduled time but within {{graceTime}} milliseconds then an event is generated. > Implement a scheduled trigger > - > > Key: SOLR-11066 > URL: https://issues.apache.org/jira/browse/SOLR-11066 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: AutoScaling, SolrCloud >Reporter: Shalin Shekhar Mangar >Assignee: Shalin Shekhar Mangar >Priority: Major > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11066.patch > > > Implement a trigger that runs on a fixed interval say every 1 hour or every > 24 hours starting at midnight etc. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11914) Remove/move questionable SolrParams methods
David Smiley created SOLR-11914: --- Summary: Remove/move questionable SolrParams methods Key: SOLR-11914 URL: https://issues.apache.org/jira/browse/SOLR-11914 Project: Solr Issue Type: Improvement Security Level: Public (Default Security Level. Issues are Public) Components: SolrJ Reporter: David Smiley {{MapgetAll(Map sink, Collection params)}} Is only used by the CollectionsHandler, and has particular rules about how it handles multi-valued data that make it not very generic, and thus I think doesn't belong here. Furthermore the existence of this method is confusing in that it gives the user another choice against it use versus toMap (there are two overloaded variants). {{SolrParams toFilteredSolrParams(List names)}} Is only called in one place, and something about it bothers me, perhaps just the name or that it ought to be a view maybe. {{static Map toMap(NamedList params)}} Isn't used and I don't like it; it doesn't even involve a SolrParams! Legacy of 2006. {{static Map toMultiMap(NamedList params)}} It doesn't even involve a SolrParams! Legacy of 2006 with some updates since. Used in some places. Perhaps should be moved to NamedList as an instance method. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11066) Implement a scheduled trigger
[ https://issues.apache.org/jira/browse/SOLR-11066?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Shalin Shekhar Mangar updated SOLR-11066: - Attachment: SOLR-11066.patch > Implement a scheduled trigger > - > > Key: SOLR-11066 > URL: https://issues.apache.org/jira/browse/SOLR-11066 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: AutoScaling, SolrCloud >Reporter: Shalin Shekhar Mangar >Assignee: Shalin Shekhar Mangar >Priority: Major > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11066.patch > > > Implement a trigger that runs on a fixed interval say every 1 hour or every > 24 hours starting at midnight etc. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11782) LatchWatcher.await doesn’t protect against spurious wakeup
[ https://issues.apache.org/jira/browse/SOLR-11782?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341577#comment-16341577 ] Dawid Weiss commented on SOLR-11782: That method already protects against spurious wakeups, so no need to compute anything? "Causes the current thread to wait until it is signalled or interrupted, or the specified waiting time elapses." > LatchWatcher.await doesn’t protect against spurious wakeup > -- > > Key: SOLR-11782 > URL: https://issues.apache.org/jira/browse/SOLR-11782 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tomás Fernández Löbbe >Assignee: Tomás Fernández Löbbe >Priority: Minor > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11782.patch, SOLR-11782.patch, SOLR-11782.patch > > > I noticed that {{LatchWatcher.await}} does: > {code} > public void await(long timeout) throws InterruptedException { > synchronized (lock) { > if (this.event != null) return; > lock.wait(timeout); > } > } > {code} > while the recommendation of lock.wait is to check the wait condition even > after the method returns in case of spurious wakeup. {{lock}} is a private > local field to which {{notifyAll}} is called only after a zk event is being > handled. I think we should check the {{await}} method to something like: > {code} > public void await(long timeout) throws InterruptedException { > assert timeout > 0; > long timeoutTime = System.currentTimeMillis() + timeout; > synchronized (lock) { > while (this.event == null) { > long nextTimeout = timeoutTime - System.currentTimeMillis(); > if (nextTimeout <= 0) { > return; > } > lock.wait(nextTimeout); > } > } > } > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11913) SolrParams ought to implement Iterable<Map.Entry<String,String[]>>
David Smiley created SOLR-11913: --- Summary: SolrParams ought to implement Iterable> Key: SOLR-11913 URL: https://issues.apache.org/jira/browse/SOLR-11913 Project: Solr Issue Type: Improvement Security Level: Public (Default Security Level. Issues are Public) Reporter: David Smiley SolrJ ought to implement {{Iterable >}} so that it's easier to iterate on it, either using Java 5 for-each style, or Java 8 streams. The implementation on ModifiableSolrParams can delegate through to the underlying LinkedHashMap entry set. The default impl can produce a Map.Entry with a getValue that calls through to getParams. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Linux (64bit/jdk-9.0.1) - Build # 21352 - Failure!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21352/ Java: 64bit/jdk-9.0.1 -XX:-UseCompressedOops -XX:+UseG1GC 5 tests failed. FAILED: junit.framework.TestSuite.org.apache.solr.cloud.BasicZkTest Error Message: SolrCore.getOpenCount()==2 Stack Trace: java.lang.RuntimeException: SolrCore.getOpenCount()==2 at __randomizedtesting.SeedInfo.seed([C5ABA40195487ADB]:0) at org.apache.solr.util.TestHarness.close(TestHarness.java:379) at org.apache.solr.SolrTestCaseJ4.deleteCore(SolrTestCaseJ4.java:792) at org.apache.solr.cloud.AbstractZkTestCase.azt_afterClass(AbstractZkTestCase.java:147) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:897) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.base/java.lang.Thread.run(Thread.java:844) FAILED: junit.framework.TestSuite.org.apache.solr.cloud.BasicZkTest Error Message: SolrCore.getOpenCount()==2 Stack Trace: java.lang.RuntimeException: SolrCore.getOpenCount()==2 at __randomizedtesting.SeedInfo.seed([C5ABA40195487ADB]:0) at org.apache.solr.util.TestHarness.close(TestHarness.java:379) at org.apache.solr.SolrTestCaseJ4.deleteCore(SolrTestCaseJ4.java:792) at org.apache.solr.SolrTestCaseJ4.teardownTestCases(SolrTestCaseJ4.java:288) at jdk.internal.reflect.GeneratedMethodAccessor131.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:897) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
[jira] [Commented] (SOLR-11891) BinaryResponseWriter fetches unnecessary fields
[ https://issues.apache.org/jira/browse/SOLR-11891?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341547#comment-16341547 ] David Smiley commented on SOLR-11891: - bq. Is there any performance implications to enable docValues for the uniqueKey field, i.e retrieve via docValues vs stored field? Good ones :-) Retrieval should be much faster. You'd have to re-index changing the schema in such a way. And there is some extra storage for docValues of any field... but in the scheme of things it's not a big deal. The optimization I'm referring to is SOLR-8344 which landed in 7.1. The "id" field in the default configset ought to be docValues but it is not... I ought to file a JIRA for it. > BinaryResponseWriter fetches unnecessary fields > --- > > Key: SOLR-11891 > URL: https://issues.apache.org/jira/browse/SOLR-11891 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: Response Writers >Affects Versions: 5.4, 6.4.2, 6.6.2 >Reporter: wei wang >Priority: Major > > We observe that solr query time increases significantly with the number of > rows requested, even all we retrieve for each document is just fl=id,score. > Debugged a bit and see that most of the increased time was spent in > BinaryResponseWriter, converting lucene document into SolrDocument. Inside > convertLuceneDocToSolrDoc(): > [https://github.com/apache/lucene-solr/blob/df874432b9a17b547acb24a01d3491839e6a6b69/solr/core/src/java/org/apache/solr/response/DocsStreamer.java#L182] > > I am a bit puzzled why we need to iterate through all the fields in the > document. Why can’t we just iterate through the requested field list? > [https://github.com/apache/lucene-solr/blob/df874432b9a17b547acb24a01d3491839e6a6b69/solr/core/src/java/org/apache/solr/response/DocsStreamer.java#L156] > > e.g. when pass in the field list as > sdoc = convertLuceneDocToSolrDoc(doc, rctx.getSearcher().getSchema(), fnames) > and just iterate through fnames, there is a significant performance boost in > our case. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11912) TriggerIntegrationTest fails a lot, reproducibly
Steve Rowe created SOLR-11912: - Summary: TriggerIntegrationTest fails a lot, reproducibly Key: SOLR-11912 URL: https://issues.apache.org/jira/browse/SOLR-11912 Project: Solr Issue Type: Bug Security Level: Public (Default Security Level. Issues are Public) Reporter: Steve Rowe Multiple tests in this suite are not just flaky, but are failing reproducibly. >From Hoss'ss report for that 24-hours >[http://fucit.org/solr-jenkins-reports/reports/24hours-method-failures.csv]: {noformat} org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testCooldown,thetaphi/Lucene-Solr-master-Linux/21346/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,apache/Lucene-Solr-NightlyTests-7.x/131/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,sarowe/Lucene-Solr-tests-master/14874/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,thetaphi/Lucene-Solr-7.x-Solaris/412/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,thetaphi/Lucene-Solr-master-MacOSX/4408/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testListeners,thetaphi/Lucene-Solr-master-Windows/7140/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,apache/Lucene-Solr-Tests-7.x/334/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,sarowe/Lucene-Solr-tests-7.x/2526/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-7.x-Linux/1243/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-7.x-Windows/424/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21344/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21345/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21350/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTrigger,thetaphi/Lucene-Solr-master-Windows/7139/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,apache/Lucene-Solr-Tests-7.x/334/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,thetaphi/Lucene-Solr-7.x-Solaris/412/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,thetaphi/Lucene-Solr-master-MacOSX/4408/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeLostTrigger,thetaphi/Lucene-Solr-7.x-Solaris/413/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeLostTrigger,thetaphi/Lucene-Solr-master-Linux/21351/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeLostTriggerRestoreState,thetaphi/Lucene-Solr-master-MacOSX/440 {noformat} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11912) TriggerIntegrationTest fails a lot, reproducibly
[ https://issues.apache.org/jira/browse/SOLR-11912?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Steve Rowe updated SOLR-11912: -- Description: Multiple tests in this suite are not just flaky, but are failing reproducibly. >From Hoss'ss report for the last 24 hours >[http://fucit.org/solr-jenkins-reports/reports/24hours-method-failures.csv]: {noformat} org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testCooldown,thetaphi/Lucene-Solr-master-Linux/21346/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,apache/Lucene-Solr-NightlyTests-7.x/131/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,sarowe/Lucene-Solr-tests-master/14874/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,thetaphi/Lucene-Solr-7.x-Solaris/412/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,thetaphi/Lucene-Solr-master-MacOSX/4408/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testListeners,thetaphi/Lucene-Solr-master-Windows/7140/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,apache/Lucene-Solr-Tests-7.x/334/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,sarowe/Lucene-Solr-tests-7.x/2526/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-7.x-Linux/1243/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-7.x-Windows/424/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21344/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21345/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21350/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTrigger,thetaphi/Lucene-Solr-master-Windows/7139/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,apache/Lucene-Solr-Tests-7.x/334/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,thetaphi/Lucene-Solr-7.x-Solaris/412/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,thetaphi/Lucene-Solr-master-MacOSX/4408/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeLostTrigger,thetaphi/Lucene-Solr-7.x-Solaris/413/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeLostTrigger,thetaphi/Lucene-Solr-master-Linux/21351/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeLostTriggerRestoreState,thetaphi/Lucene-Solr-master-MacOSX/440 {noformat} was: Multiple tests in this suite are not just flaky, but are failing reproducibly. >From Hoss'ss report for that 24-hours >[http://fucit.org/solr-jenkins-reports/reports/24hours-method-failures.csv]: {noformat} org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testCooldown,thetaphi/Lucene-Solr-master-Linux/21346/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,apache/Lucene-Solr-NightlyTests-7.x/131/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,sarowe/Lucene-Solr-tests-master/14874/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,thetaphi/Lucene-Solr-7.x-Solaris/412/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testEventFromRestoredState,thetaphi/Lucene-Solr-master-MacOSX/4408/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testListeners,thetaphi/Lucene-Solr-master-Windows/7140/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,apache/Lucene-Solr-Tests-7.x/334/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,sarowe/Lucene-Solr-tests-7.x/2526/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-7.x-Linux/1243/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-7.x-Windows/424/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21344/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21345/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testMetricTrigger,thetaphi/Lucene-Solr-master-Linux/21350/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTrigger,thetaphi/Lucene-Solr-master-Windows/7139/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,apache/Lucene-Solr-Tests-7.x/334/ org.apache.solr.cloud.autoscaling.TriggerIntegrationTest,testNodeAddedTriggerRestoreState,thetaphi/Lucene-Solr-7.x-Solaris/412/
[jira] [Commented] (SOLR-8090) Admin Interface Fails WCAG Contrast Ratio Tests
[ https://issues.apache.org/jira/browse/SOLR-8090?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341528#comment-16341528 ] Cassandra Targett commented on SOLR-8090: - I forgot about this issue, but agree the contrast is really hard to read. I changed it to #8D8D8D on the logging screen in SOLR-11895, but reviewing that choice in the context of this test, it's only a marginally better choice than what exists today. Using #4D4D4D would be dark enough to pass the test but still be lighter than #000 to achieve the original goal of de-emphasizing some text. That said, there are some things that we just shouldn't de-emphasize at all (like the Collection/Core menus in the left drop-down), so I would propose just not trying to do it in those places. I'll put a patch up for this in a day or so, with some before/after screenshots. > Admin Interface Fails WCAG Contrast Ratio Tests > --- > > Key: SOLR-8090 > URL: https://issues.apache.org/jira/browse/SOLR-8090 > Project: Solr > Issue Type: Improvement > Components: Admin UI >Affects Versions: 5.3 >Reporter: Mike Mallett >Priority: Major > Attachments: Solr-5.3.1-AdminUI-Contrast-Examples.png > > > The CSS for the admin UI in 5.3.1 has the following definition which affects > help text in a variety of places: > #core-selector #has-no-cores span > { > color: #c0c0c0; > display: block; > } > This is displayed on a white background, making the text with colour #c0c0c0 > next to impossible to read. This fails WCAG accessability guidelines and can > be checked using this Contract Checker: > http://webaim.org/resources/contrastchecker/ > Please adjust the CSS for the admin UI to display text in a more highly > contrasted colour. Thanks. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-NightlyTests-master - Build # 1460 - Still unstable
Build: https://builds.apache.org/job/Lucene-Solr-NightlyTests-master/1460/ 3 tests failed. FAILED: org.apache.solr.cloud.hdfs.StressHdfsTest.test Error Message: Error from server at http://127.0.0.1:38055/fa_gum/e: ADDREPLICA failed to create replica Stack Trace: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://127.0.0.1:38055/fa_gum/e: ADDREPLICA failed to create replica at __randomizedtesting.SeedInfo.seed([217C396308F0001F:A92806B9A60C6DE7]:0) at org.apache.solr.client.solrj.impl.HttpSolrClient.executeMethod(HttpSolrClient.java:643) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:255) at org.apache.solr.client.solrj.impl.HttpSolrClient.request(HttpSolrClient.java:244) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.doRequest(LBHttpSolrClient.java:483) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:413) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1104) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:211) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.createJettys(AbstractFullDistribZkTestBase.java:425) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.createServers(AbstractFullDistribZkTestBase.java:341) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:991) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at
[JENKINS] Lucene-Solr-7.x-Solaris (64bit/jdk1.8.0) - Build # 414 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Solaris/414/ Java: 64bit/jdk1.8.0 -XX:+UseCompressedOops -XX:+UseConcMarkSweepGC 2 tests failed. FAILED: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testCooldown Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([A7318FFE63EC6385:968FE21A1D461677]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertTrue(Assert.java:54) at org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testCooldown(TriggerIntegrationTest.java:1177) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) FAILED: org.apache.solr.client.solrj.io.stream.StreamExpressionTest.testDistributions Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([806090A8020763D1:3F9FD102DCFD834D]:0) at
[jira] [Commented] (SOLR-6057) Duplicate background-color in #content #analysis #analysis-result .match (analysis.css)
[ https://issues.apache.org/jira/browse/SOLR-6057?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341486#comment-16341486 ] Cassandra Targett commented on SOLR-6057: - I've attached a patch to finally fix the duplication and change the color. Instead of using green, I opted to change the color to [#F0D9C3|https://www.hexcolortool.com/#F0D9C3] - a tannish color (to me) - because it looks mostly the same for those with any of the various types of color blindness (see screenshots attached: {{Analysis-UI-deuteranopia.png}} is from Sim Daltonism, an app that mimics what some users may see when looking at your application). I also took the liberty of darkening the gray used for the textual elements, which I did in SOLR-11895 for the Logging screen. If there are no objections, we can finally get this one closed for 7.3. > Duplicate background-color in #content #analysis #analysis-result .match > (analysis.css) > --- > > Key: SOLR-6057 > URL: https://issues.apache.org/jira/browse/SOLR-6057 > Project: Solr > Issue Type: Bug > Components: Admin UI >Reporter: Al Krinker >Priority: Trivial > Attachments: Analysis-UI-before.png, Analysis-UI-deuteranopia.png, > Analysis-UI-newColor.png, SOLR-6057.patch > > > Inside of solr/webapp/web/css/styles/analysis.css, you can find #content > #analysis #analysis-result .match element with following content: > #content #analysis #analysis-result .match > { > background-color: #e9eff7; > background-color: #f2f2ff; > } > background-color listed twice. > Also, it was very hard for me to see the highlight. Recommend to change it to > background-color: #FF; -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
Re: ***UNCHECKED*** Re: [JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-10-ea+41) - Build # 1250 - Still Failing!
Uwe managed to reproduce it and reported it: https://bugs.openjdk.java.net/browse/JDK-8196296 Thanks Uwe! On Fri, Jan 26, 2018 at 10:33 AM, Robert Muirwrote: > there it is with latest JVM, again from analysis/icu. I think we > should report a bug. > > On Fri, Jan 26, 2018 at 10:30 AM, Policeman Jenkins Server > wrote: >> Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1250/ >> Java: 64bit/jdk-10-ea+41 -XX:-UseCompressedOops -XX:+UseG1GC >> >> All tests passed >> >> Build Log: >> [...truncated 3327 lines...] >>[junit4] JVM J1: stdout was not empty, see: >> /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/temp/junit4-J1-20180126_153005_25116767526503423538987.sysout >>[junit4] >>> JVM J1 emitted unexpected output (verbatim) >>[junit4] Default case invoked for: >>[junit4]opcode = 0, "Node" >>[junit4] # >>[junit4] # A fatal error has been detected by the Java Runtime >> Environment: >>[junit4] # >>[junit4] # SIGSEGV (0xb) at pc=0x7f988181ea35, pid=15712, tid=15792 >>[junit4] # >>[junit4] # JRE version: OpenJDK Runtime Environment (10.0+41) (build >> 10-ea+41) >>[junit4] # Java VM: OpenJDK 64-Bit Server VM (10-ea+41, mixed mode, >> tiered, g1 gc, linux-amd64) >>[junit4] # Problematic frame: >>[junit4] # V [libjvm.so+0xb31a35] Node::add_req(Node*)+0xb5 >>[junit4] # >>[junit4] # No core dump will be written. Core dumps have been disabled. >> To enable core dumping, try "ulimit -c unlimited" before starting Java again >>[junit4] # >>[junit4] # An error report file with more information is saved as: >>[junit4] # >> /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1/hs_err_pid15712.log >>[junit4] # >>[junit4] # Compiler replay data is saved as: >>[junit4] # >> /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1/replay_pid15712.log >>[junit4] # >>[junit4] # If you would like to submit a bug report, please visit: >>[junit4] # http://bugreport.java.com/bugreport/crash.jsp >>[junit4] # >>[junit4] <<< JVM J1: EOF >> >> [...truncated 3 lines...] >>[junit4] ERROR: JVM J1 ended with an exception, command line: >> /home/jenkins/tools/java/64bit/jdk-10-ea+41/bin/java -XX:-UseCompressedOops >> -XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError >> -XX:HeapDumpPath=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/heapdumps -ea >> -esa --illegal-access=deny -Dtests.prefix=tests >> -Dtests.seed=F19E4EFC03FEFEC8 -Xmx512M -Dtests.iters= -Dtests.verbose=false >> -Dtests.infostream=false -Dtests.codec=random -Dtests.postingsformat=random >> -Dtests.docvaluesformat=random -Dtests.locale=random -Dtests.timezone=random >> -Dtests.directory=random -Dtests.linedocsfile=europarl.lines.txt.gz >> -Dtests.luceneMatchVersion=7.3.0 -Dtests.cleanthreads=perMethod >> -Djava.util.logging.config.file=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/tools/junit4/logging.properties >> -Dtests.nightly=false -Dtests.weekly=false -Dtests.monster=false >> -Dtests.slow=true -Dtests.asserts=true -Dtests.multiplier=3 -DtempDir=./temp >> -Djava.io.tmpdir=./temp >> -Djunit4.tempDir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/temp >> -Dcommon.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene >> -Dclover.db.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/clover/db >> >> -Djava.security.policy=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/tools/junit4/tests.policy >> -Dtests.LUCENE_VERSION=7.3.0 -Djetty.testMode=1 -Djetty.insecurerandom=1 >> -Dsolr.directoryFactory=org.apache.solr.core.MockDirectoryFactory >> -Djava.awt.headless=true -Djdk.map.althashing.threshold=0 >> -Dtests.src.home=/home/jenkins/workspace/Lucene-Solr-7.x-Linux >> -Djava.security.egd=file:/dev/./urandom >> -Djunit4.childvm.cwd=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1 >> -Djunit4.childvm.id=1 -Djunit4.childvm.count=3 -Dfile.encoding=UTF-8 >> -Djava.security.manager=org.apache.lucene.util.TestSecurityManager >> -Dtests.filterstacks=true -Dtests.leaveTemporary=false -classpath >>
[jira] [Updated] (SOLR-6057) Duplicate background-color in #content #analysis #analysis-result .match (analysis.css)
[ https://issues.apache.org/jira/browse/SOLR-6057?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Cassandra Targett updated SOLR-6057: Attachment: Analysis-UI-newColor.png Analysis-UI-deuteranopia.png Analysis-UI-before.png > Duplicate background-color in #content #analysis #analysis-result .match > (analysis.css) > --- > > Key: SOLR-6057 > URL: https://issues.apache.org/jira/browse/SOLR-6057 > Project: Solr > Issue Type: Bug > Components: Admin UI >Reporter: Al Krinker >Priority: Trivial > Attachments: Analysis-UI-before.png, Analysis-UI-deuteranopia.png, > Analysis-UI-newColor.png, SOLR-6057.patch > > > Inside of solr/webapp/web/css/styles/analysis.css, you can find #content > #analysis #analysis-result .match element with following content: > #content #analysis #analysis-result .match > { > background-color: #e9eff7; > background-color: #f2f2ff; > } > background-color listed twice. > Also, it was very hard for me to see the highlight. Recommend to change it to > background-color: #FF; -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-6057) Duplicate background-color in #content #analysis #analysis-result .match (analysis.css)
[ https://issues.apache.org/jira/browse/SOLR-6057?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Cassandra Targett updated SOLR-6057: Attachment: SOLR-6057.patch > Duplicate background-color in #content #analysis #analysis-result .match > (analysis.css) > --- > > Key: SOLR-6057 > URL: https://issues.apache.org/jira/browse/SOLR-6057 > Project: Solr > Issue Type: Bug > Components: Admin UI >Reporter: Al Krinker >Priority: Trivial > Attachments: Analysis-UI-before.png, Analysis-UI-deuteranopia.png, > Analysis-UI-newColor.png, SOLR-6057.patch > > > Inside of solr/webapp/web/css/styles/analysis.css, you can find #content > #analysis #analysis-result .match element with following content: > #content #analysis #analysis-result .match > { > background-color: #e9eff7; > background-color: #f2f2ff; > } > background-color listed twice. > Also, it was very hard for me to see the highlight. Recommend to change it to > background-color: #FF; -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11911) TestLargeCluster.testSearchRate() failure
[ https://issues.apache.org/jira/browse/SOLR-11911?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Steve Rowe updated SOLR-11911: -- Description: My Jenkins found a branch_7x seed that reproduced 4/5 times for me: {noformat} Checking out Revision af9706cb89335a5aa04f9bcae0c2558a61803b50 (refs/remotes/origin/branch_7x) [...] [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestLargeCluster -Dtests.method=testSearchRate -Dtests.seed=2D7724685882A83D -Dtests.slow=true -Dtests.locale=be-BY -Dtests.timezone=Africa/Ouagadougou -Dtests.asserts=true -Dtests.file.encoding=UTF-8 [junit4] FAILURE 1.24s J0 | TestLargeCluster.testSearchRate <<< [junit4]> Throwable #1: java.lang.AssertionError: The trigger did not fire at all [junit4]>at __randomizedtesting.SeedInfo.seed([2D7724685882A83D:703F3AE197440E72]:0) [junit4]>at org.apache.solr.cloud.autoscaling.sim.TestLargeCluster.testSearchRate(TestLargeCluster.java:547) [junit4]>at java.lang.Thread.run(Thread.java:748) [...] [junit4] 2> NOTE: test params are: codec=CheapBastard, sim=RandomSimilarity(queryNorm=true): {}, locale=be-BY, timezone=Africa/Ouagadougou [junit4] 2> NOTE: Linux 4.1.0-custom2-amd64 amd64/Oracle Corporation 1.8.0_151 (64-bit)/cpus=16,threads=1,free=388243840,total=502267904 {noformat} > TestLargeCluster.testSearchRate() failure > - > > Key: SOLR-11911 > URL: https://issues.apache.org/jira/browse/SOLR-11911 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Steve Rowe >Priority: Major > > My Jenkins found a branch_7x seed that reproduced 4/5 times for me: > {noformat} > Checking out Revision af9706cb89335a5aa04f9bcae0c2558a61803b50 > (refs/remotes/origin/branch_7x) > [...] >[junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestLargeCluster > -Dtests.method=testSearchRate -Dtests.seed=2D7724685882A83D -Dtests.slow=true > -Dtests.locale=be-BY -Dtests.timezone=Africa/Ouagadougou -Dtests.asserts=true > -Dtests.file.encoding=UTF-8 >[junit4] FAILURE 1.24s J0 | TestLargeCluster.testSearchRate <<< >[junit4]> Throwable #1: java.lang.AssertionError: The trigger did not > fire at all >[junit4]> at > __randomizedtesting.SeedInfo.seed([2D7724685882A83D:703F3AE197440E72]:0) >[junit4]> at > org.apache.solr.cloud.autoscaling.sim.TestLargeCluster.testSearchRate(TestLargeCluster.java:547) >[junit4]> at java.lang.Thread.run(Thread.java:748) > [...] >[junit4] 2> NOTE: test params are: codec=CheapBastard, > sim=RandomSimilarity(queryNorm=true): {}, locale=be-BY, > timezone=Africa/Ouagadougou >[junit4] 2> NOTE: Linux 4.1.0-custom2-amd64 amd64/Oracle Corporation > 1.8.0_151 (64-bit)/cpus=16,threads=1,free=388243840,total=502267904 > {noformat} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Updated] (SOLR-11911) TestLargeCluster.testSearchRate() failure
[ https://issues.apache.org/jira/browse/SOLR-11911?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Steve Rowe updated SOLR-11911: -- Environment: (was: My Jenkins found a branch_7x seed that reproduced 4/5 times for me: {noformat} Checking out Revision af9706cb89335a5aa04f9bcae0c2558a61803b50 (refs/remotes/origin/branch_7x) [...] [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestLargeCluster -Dtests.method=testSearchRate -Dtests.seed=2D7724685882A83D -Dtests.slow=true -Dtests.locale=be-BY -Dtests.timezone=Africa/Ouagadougou -Dtests.asserts=true -Dtests.file.encoding=UTF-8 [junit4] FAILURE 1.24s J0 | TestLargeCluster.testSearchRate <<< [junit4]> Throwable #1: java.lang.AssertionError: The trigger did not fire at all [junit4]>at __randomizedtesting.SeedInfo.seed([2D7724685882A83D:703F3AE197440E72]:0) [junit4]>at org.apache.solr.cloud.autoscaling.sim.TestLargeCluster.testSearchRate(TestLargeCluster.java:547) [junit4]>at java.lang.Thread.run(Thread.java:748) [...] [junit4] 2> NOTE: test params are: codec=CheapBastard, sim=RandomSimilarity(queryNorm=true): {}, locale=be-BY, timezone=Africa/Ouagadougou [junit4] 2> NOTE: Linux 4.1.0-custom2-amd64 amd64/Oracle Corporation 1.8.0_151 (64-bit)/cpus=16,threads=1,free=388243840,total=502267904 {noformat}) > TestLargeCluster.testSearchRate() failure > - > > Key: SOLR-11911 > URL: https://issues.apache.org/jira/browse/SOLR-11911 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Steve Rowe >Priority: Major > -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Created] (SOLR-11911) TestLargeCluster.testSearchRate() failure
Steve Rowe created SOLR-11911: - Summary: TestLargeCluster.testSearchRate() failure Key: SOLR-11911 URL: https://issues.apache.org/jira/browse/SOLR-11911 Project: Solr Issue Type: Bug Security Level: Public (Default Security Level. Issues are Public) Environment: My Jenkins found a branch_7x seed that reproduced 4/5 times for me: {noformat} Checking out Revision af9706cb89335a5aa04f9bcae0c2558a61803b50 (refs/remotes/origin/branch_7x) [...] [junit4] 2> NOTE: reproduce with: ant test -Dtestcase=TestLargeCluster -Dtests.method=testSearchRate -Dtests.seed=2D7724685882A83D -Dtests.slow=true -Dtests.locale=be-BY -Dtests.timezone=Africa/Ouagadougou -Dtests.asserts=true -Dtests.file.encoding=UTF-8 [junit4] FAILURE 1.24s J0 | TestLargeCluster.testSearchRate <<< [junit4]> Throwable #1: java.lang.AssertionError: The trigger did not fire at all [junit4]>at __randomizedtesting.SeedInfo.seed([2D7724685882A83D:703F3AE197440E72]:0) [junit4]>at org.apache.solr.cloud.autoscaling.sim.TestLargeCluster.testSearchRate(TestLargeCluster.java:547) [junit4]>at java.lang.Thread.run(Thread.java:748) [...] [junit4] 2> NOTE: test params are: codec=CheapBastard, sim=RandomSimilarity(queryNorm=true): {}, locale=be-BY, timezone=Africa/Ouagadougou [junit4] 2> NOTE: Linux 4.1.0-custom2-amd64 amd64/Oracle Corporation 1.8.0_151 (64-bit)/cpus=16,threads=1,free=388243840,total=502267904 {noformat} Reporter: Steve Rowe -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11891) BinaryResponseWriter fetches unnecessary fields
[ https://issues.apache.org/jira/browse/SOLR-11891?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341423#comment-16341423 ] wei wang commented on SOLR-11891: - Thanks all. {quote}[~weiwang19], by the way, you ought to enable docValues for your uniqueKey defined field. Recent versions of Solr are smart enough to avoid the stored document altogether if all requested fields are docValues. {quote} Is there any performance implications to enable docValues for the uniqueKey field, i.e retrieve via docValues vs stored field? > BinaryResponseWriter fetches unnecessary fields > --- > > Key: SOLR-11891 > URL: https://issues.apache.org/jira/browse/SOLR-11891 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: Response Writers >Affects Versions: 5.4, 6.4.2, 6.6.2 >Reporter: wei wang >Priority: Major > > We observe that solr query time increases significantly with the number of > rows requested, even all we retrieve for each document is just fl=id,score. > Debugged a bit and see that most of the increased time was spent in > BinaryResponseWriter, converting lucene document into SolrDocument. Inside > convertLuceneDocToSolrDoc(): > [https://github.com/apache/lucene-solr/blob/df874432b9a17b547acb24a01d3491839e6a6b69/solr/core/src/java/org/apache/solr/response/DocsStreamer.java#L182] > > I am a bit puzzled why we need to iterate through all the fields in the > document. Why can’t we just iterate through the requested field list? > [https://github.com/apache/lucene-solr/blob/df874432b9a17b547acb24a01d3491839e6a6b69/solr/core/src/java/org/apache/solr/response/DocsStreamer.java#L156] > > e.g. when pass in the field list as > sdoc = convertLuceneDocToSolrDoc(doc, rctx.getSearcher().getSchema(), fnames) > and just iterate through fnames, there is a significant performance boost in > our case. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Assigned] (SOLR-11782) LatchWatcher.await doesn’t protect against spurious wakeup
[ https://issues.apache.org/jira/browse/SOLR-11782?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tomás Fernández Löbbe reassigned SOLR-11782: Resolution: Fixed Assignee: Tomás Fernández Löbbe Fix Version/s: 7.3 master (8.0) > LatchWatcher.await doesn’t protect against spurious wakeup > -- > > Key: SOLR-11782 > URL: https://issues.apache.org/jira/browse/SOLR-11782 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tomás Fernández Löbbe >Assignee: Tomás Fernández Löbbe >Priority: Minor > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11782.patch, SOLR-11782.patch, SOLR-11782.patch > > > I noticed that {{LatchWatcher.await}} does: > {code} > public void await(long timeout) throws InterruptedException { > synchronized (lock) { > if (this.event != null) return; > lock.wait(timeout); > } > } > {code} > while the recommendation of lock.wait is to check the wait condition even > after the method returns in case of spurious wakeup. {{lock}} is a private > local field to which {{notifyAll}} is called only after a zk event is being > handled. I think we should check the {{await}} method to something like: > {code} > public void await(long timeout) throws InterruptedException { > assert timeout > 0; > long timeoutTime = System.currentTimeMillis() + timeout; > synchronized (lock) { > while (this.event == null) { > long nextTimeout = timeoutTime - System.currentTimeMillis(); > if (nextTimeout <= 0) { > return; > } > lock.wait(nextTimeout); > } > } > } > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11782) LatchWatcher.await doesn’t protect against spurious wakeup
[ https://issues.apache.org/jira/browse/SOLR-11782?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341409#comment-16341409 ] ASF subversion and git services commented on SOLR-11782: Commit a29c2915c79641fc117cc56044d07134f26e8fd7 in lucene-solr's branch refs/heads/branch_7x from [~tomasflobbe] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=a29c291 ] SOLR-11782: Refactor LatchWatcher.await to protect against spurious wakeup > LatchWatcher.await doesn’t protect against spurious wakeup > -- > > Key: SOLR-11782 > URL: https://issues.apache.org/jira/browse/SOLR-11782 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tomás Fernández Löbbe >Priority: Minor > Attachments: SOLR-11782.patch, SOLR-11782.patch, SOLR-11782.patch > > > I noticed that {{LatchWatcher.await}} does: > {code} > public void await(long timeout) throws InterruptedException { > synchronized (lock) { > if (this.event != null) return; > lock.wait(timeout); > } > } > {code} > while the recommendation of lock.wait is to check the wait condition even > after the method returns in case of spurious wakeup. {{lock}} is a private > local field to which {{notifyAll}} is called only after a zk event is being > handled. I think we should check the {{await}} method to something like: > {code} > public void await(long timeout) throws InterruptedException { > assert timeout > 0; > long timeoutTime = System.currentTimeMillis() + timeout; > synchronized (lock) { > while (this.event == null) { > long nextTimeout = timeoutTime - System.currentTimeMillis(); > if (nextTimeout <= 0) { > return; > } > lock.wait(nextTimeout); > } > } > } > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11782) LatchWatcher.await doesn’t protect against spurious wakeup
[ https://issues.apache.org/jira/browse/SOLR-11782?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341407#comment-16341407 ] ASF subversion and git services commented on SOLR-11782: Commit 56f3f6d9484dd353ac50d47717c872ca9dac16ea in lucene-solr's branch refs/heads/master from [~tomasflobbe] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=56f3f6d ] SOLR-11782: Refactor LatchWatcher.await to protect against spurious wakeup > LatchWatcher.await doesn’t protect against spurious wakeup > -- > > Key: SOLR-11782 > URL: https://issues.apache.org/jira/browse/SOLR-11782 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Tomás Fernández Löbbe >Priority: Minor > Attachments: SOLR-11782.patch, SOLR-11782.patch, SOLR-11782.patch > > > I noticed that {{LatchWatcher.await}} does: > {code} > public void await(long timeout) throws InterruptedException { > synchronized (lock) { > if (this.event != null) return; > lock.wait(timeout); > } > } > {code} > while the recommendation of lock.wait is to check the wait condition even > after the method returns in case of spurious wakeup. {{lock}} is a private > local field to which {{notifyAll}} is called only after a zk event is being > handled. I think we should check the {{await}} method to something like: > {code} > public void await(long timeout) throws InterruptedException { > assert timeout > 0; > long timeoutTime = System.currentTimeMillis() + timeout; > synchronized (lock) { > while (this.event == null) { > long nextTimeout = timeoutTime - System.currentTimeMillis(); > if (nextTimeout <= 0) { > return; > } > lock.wait(nextTimeout); > } > } > } > {code} -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-11895) Fix "no events available" message on logging tab
[ https://issues.apache.org/jira/browse/SOLR-11895?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Cassandra Targett resolved SOLR-11895. -- Resolution: Fixed Assignee: Cassandra Targett Fix Version/s: 7.3 master (8.0) Committed my patch for a simple fix. > Fix "no events available" message on logging tab > > > Key: SOLR-11895 > URL: https://issues.apache.org/jira/browse/SOLR-11895 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: Admin UI >Reporter: Shawn Heisey >Assignee: Cassandra Targett >Priority: Major > Fix For: master (8.0), 7.3 > > Attachments: SOLR-11895.patch, logging-after-noEvent.png, > logging-after-withError.png, logging-before.png > > > A properly running Solr server with a very clean configuration may have > absolutely no messages logged at WARN or ERROR. When this happens, clicking > on the logging tab produces a screen with no messages and a spinning cursor, > which looks like the page is hung. > It has come to my attention that there is a "No events available" message > already in the UI, but it doesn't work. I can think of two ways to fix the > problem: > * Fix the detection in the UI so that the "No events available" message > actually works. Also, the spinner could be removed. The spinner is a big > reason that the page looks broken. > * Solr could log a "startup complete" message at WARN so that there is > always something to display. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11895) Fix "no events available" message on logging tab
[ https://issues.apache.org/jira/browse/SOLR-11895?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341400#comment-16341400 ] ASF subversion and git services commented on SOLR-11895: Commit 952cd5981c938159e279386dd1370ad3b7798029 in lucene-solr's branch refs/heads/branch_7x from [~ctargett] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=952cd59 ] SOLR-11895: Fix Logging UI page to show "No Events"; remove loading spinner > Fix "no events available" message on logging tab > > > Key: SOLR-11895 > URL: https://issues.apache.org/jira/browse/SOLR-11895 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: Admin UI >Reporter: Shawn Heisey >Priority: Major > Attachments: SOLR-11895.patch, logging-after-noEvent.png, > logging-after-withError.png, logging-before.png > > > A properly running Solr server with a very clean configuration may have > absolutely no messages logged at WARN or ERROR. When this happens, clicking > on the logging tab produces a screen with no messages and a spinning cursor, > which looks like the page is hung. > It has come to my attention that there is a "No events available" message > already in the UI, but it doesn't work. I can think of two ways to fix the > problem: > * Fix the detection in the UI so that the "No events available" message > actually works. Also, the spinner could be removed. The spinner is a big > reason that the page looks broken. > * Solr could log a "startup complete" message at WARN so that there is > always something to display. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11895) Fix "no events available" message on logging tab
[ https://issues.apache.org/jira/browse/SOLR-11895?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341399#comment-16341399 ] ASF subversion and git services commented on SOLR-11895: Commit 3856ae2d853aedd69a07142fe4ca9f53069bccbb in lucene-solr's branch refs/heads/master from [~ctargett] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=3856ae2 ] SOLR-11895: Fix Logging UI page to show "No Events"; remove loading spinner > Fix "no events available" message on logging tab > > > Key: SOLR-11895 > URL: https://issues.apache.org/jira/browse/SOLR-11895 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: Admin UI >Reporter: Shawn Heisey >Priority: Major > Attachments: SOLR-11895.patch, logging-after-noEvent.png, > logging-after-withError.png, logging-before.png > > > A properly running Solr server with a very clean configuration may have > absolutely no messages logged at WARN or ERROR. When this happens, clicking > on the logging tab produces a screen with no messages and a spinning cursor, > which looks like the page is hung. > It has come to my attention that there is a "No events available" message > already in the UI, but it doesn't work. I can think of two ways to fix the > problem: > * Fix the detection in the UI so that the "No events available" message > actually works. Also, the spinner could be removed. The spinner is a big > reason that the page looks broken. > * Solr could log a "startup complete" message at WARN so that there is > always something to display. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] David Smiley resolved SOLR-11722. - Resolution: Fixed Assignee: David Smiley Fix Version/s: 7.3 > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Assignee: David Smiley >Priority: Major > Fix For: 7.3 > > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11745) SolrCore doesn't log core if too many closes called on it
[ https://issues.apache.org/jira/browse/SOLR-11745?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341342#comment-16341342 ] Tomás Fernández Löbbe commented on SOLR-11745: -- I believe logging "this" may be intentional, to track a particular instance of SolrCore. Different SolrCore can have the same name (i.e., a core reload) but they won't have the same ID. The name is also useful, but I believe it's included in the "logid", and also in the MDC context already. > SolrCore doesn't log core if too many closes called on it > - > > Key: SOLR-11745 > URL: https://issues.apache.org/jira/browse/SOLR-11745 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: logging >Affects Versions: 7.1 >Reporter: Jeff Miller >Priority: Trivial > Original Estimate: 5m > Remaining Estimate: 5m > > log.error("Too many close [count:{}] on {}. Please report this > exception to solr-u...@lucene.apache.org", count, this ); > Calling this just prints > org.apache.solr.core.SolrCore@4812a0d7 > Suggest changing to getName -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[JENKINS] Lucene-Solr-master-Solaris (64bit/jdk1.8.0) - Build # 1648 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Solaris/1648/ Java: 64bit/jdk1.8.0 -XX:-UseCompressedOops -XX:+UseSerialGC 3 tests failed. FAILED: org.apache.solr.cloud.MoveReplicaHDFSTest.testFailedMove Error Message: No live SolrServers available to handle this request:[http://127.0.0.1:44329/solr/MoveReplicaHDFSTest_failed_coll_true, http://127.0.0.1:52786/solr/MoveReplicaHDFSTest_failed_coll_true] Stack Trace: org.apache.solr.client.solrj.SolrServerException: No live SolrServers available to handle this request:[http://127.0.0.1:44329/solr/MoveReplicaHDFSTest_failed_coll_true, http://127.0.0.1:52786/solr/MoveReplicaHDFSTest_failed_coll_true] at __randomizedtesting.SeedInfo.seed([7D4AEC96A8F8D441:D7873F641F2B0191]:0) at org.apache.solr.client.solrj.impl.LBHttpSolrClient.request(LBHttpSolrClient.java:462) at org.apache.solr.client.solrj.impl.CloudSolrClient.sendRequest(CloudSolrClient.java:1104) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:884) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:991) at org.apache.solr.client.solrj.impl.CloudSolrClient.requestWithRetryOnStaleState(CloudSolrClient.java:991) at org.apache.solr.client.solrj.impl.CloudSolrClient.request(CloudSolrClient.java:817) at org.apache.solr.client.solrj.SolrRequest.process(SolrRequest.java:194) at org.apache.solr.client.solrj.SolrClient.query(SolrClient.java:942) at org.apache.solr.cloud.MoveReplicaTest.testFailedMove(MoveReplicaTest.java:309) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341292#comment-16341292 ] ASF subversion and git services commented on SOLR-11722: Commit 9cf9b8cc496568262fbc3bafeb14280986d7bce4 in lucene-solr's branch refs/heads/branch_7x from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=9cf9b8c ] SOLR-11722: Improve the v2/v1 API mapping, including a bug. Wrapped getParameterNamesIterator failed to consider attrToParams. (committing this separately from rest) (cherry picked from commit d8e9ab8) > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341293#comment-16341293 ] ASF subversion and git services commented on SOLR-11722: Commit 8ecbf8197042fdab6c5fd86bfc7373c545efd054 in lucene-solr's branch refs/heads/branch_7x from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=8ecbf81 ] SOLR-11722: New CREATEROUTEDALIAS cluster command for time routed aliases. (a refactoring of some of the related parts will follow in next commit) (cherry picked from commit a1828a5) > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341295#comment-16341295 ] ASF subversion and git services commented on SOLR-11722: Commit ae1f380056e726904defd8c58b8ce26b8cfde338 in lucene-solr's branch refs/heads/branch_7x from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=ae1f380 ] SOLR-11722: CHANGES.txt (cherry picked from commit 3c9829e) > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341294#comment-16341294 ] ASF subversion and git services commented on SOLR-11722: Commit 5fd8c21c56bbb934ecfe6a33dd2971f278f365b3 in lucene-solr's branch refs/heads/branch_7x from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=5fd8c21 ] SOLR-11722: Refactor out a TimeRoutedAlias class from various parts. Also allowed TRA's to be tolerant of pre-existing collections. (cherry picked from commit b0d244f) > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Assigned] (SOLR-8327) SolrDispatchFilter is not caching new state format, which results in live fetch from ZK per request if node does not contain core from collection
[ https://issues.apache.org/jira/browse/SOLR-8327?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Ishan Chattopadhyaya reassigned SOLR-8327: -- Assignee: Ishan Chattopadhyaya (was: Varun Thacker) > SolrDispatchFilter is not caching new state format, which results in live > fetch from ZK per request if node does not contain core from collection > - > > Key: SOLR-8327 > URL: https://issues.apache.org/jira/browse/SOLR-8327 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Affects Versions: 5.3 >Reporter: Jessica Cheng Mallet >Assignee: Ishan Chattopadhyaya >Priority: Major > Labels: solrcloud > Attachments: SOLR-8327.patch > > > While perf testing with non-solrj client (request can be sent to any solr > node), we noticed a huge amount of data from Zookeeper in our tcpdump (~1G > for 20 second dump). From the thread dump, we noticed this: > java.lang.Object.wait (Native Method) > java.lang.Object.wait (Object.java:503) > org.apache.zookeeper.ClientCnxn.submitRequest (ClientCnxn.java:1309) > org.apache.zookeeper.ZooKeeper.getData (ZooKeeper.java:1152) > org.apache.solr.common.cloud.SolrZkClient$7.execute (SolrZkClient.java:345) > org.apache.solr.common.cloud.SolrZkClient$7.execute (SolrZkClient.java:342) > org.apache.solr.common.cloud.ZkCmdExecutor.retryOperation > (ZkCmdExecutor.java:61) > org.apache.solr.common.cloud.SolrZkClient.getData (SolrZkClient.java:342) > org.apache.solr.common.cloud.ZkStateReader.getCollectionLive > (ZkStateReader.java:841) > org.apache.solr.common.cloud.ZkStateReader$7.get (ZkStateReader.java:515) > org.apache.solr.common.cloud.ClusterState.getCollectionOrNull > (ClusterState.java:175) > org.apache.solr.common.cloud.ClusterState.getLeader (ClusterState.java:98) > org.apache.solr.servlet.HttpSolrCall.getCoreByCollection > (HttpSolrCall.java:784) > org.apache.solr.servlet.HttpSolrCall.init (HttpSolrCall.java:272) > org.apache.solr.servlet.HttpSolrCall.call (HttpSolrCall.java:417) > org.apache.solr.servlet.SolrDispatchFilter.doFilter > (SolrDispatchFilter.java:210) > org.apache.solr.servlet.SolrDispatchFilter.doFilter > (SolrDispatchFilter.java:179) > org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter > (ServletHandler.java:1652) > org.eclipse.jetty.servlet.ServletHandler.doHandle (ServletHandler.java:585) > org.eclipse.jetty.server.handler.ScopedHandler.handle (ScopedHandler.java:143) > org.eclipse.jetty.security.SecurityHandler.handle (SecurityHandler.java:577) > org.eclipse.jetty.server.session.SessionHandler.doHandle > (SessionHandler.java:223) > org.eclipse.jetty.server.handler.ContextHandler.doHandle > (ContextHandler.java:1127) > org.eclipse.jetty.servlet.ServletHandler.doScope (ServletHandler.java:515) > org.eclipse.jetty.server.session.SessionHandler.doScope > (SessionHandler.java:185) > org.eclipse.jetty.server.handler.ContextHandler.doScope > (ContextHandler.java:1061) > org.eclipse.jetty.server.handler.ScopedHandler.handle (ScopedHandler.java:141) > org.eclipse.jetty.server.handler.ContextHandlerCollection.handle > (ContextHandlerCollection.java:215) > org.eclipse.jetty.server.handler.HandlerCollection.handle > (HandlerCollection.java:110) > org.eclipse.jetty.server.handler.HandlerWrapper.handle > (HandlerWrapper.java:97) > org.eclipse.jetty.server.Server.handle (Server.java:499) > org.eclipse.jetty.server.HttpChannel.handle (HttpChannel.java:310) > org.eclipse.jetty.server.HttpConnection.onFillable (HttpConnection.java:257) > org.eclipse.jetty.io.AbstractConnection$2.run (AbstractConnection.java:540) > org.eclipse.jetty.util.thread.QueuedThreadPool.runJob > (QueuedThreadPool.java:635) > org.eclipse.jetty.util.thread.QueuedThreadPool$3.run > (QueuedThreadPool.java:555) > java.lang.Thread.run (Thread.java:745) > Looks like SolrDispatchFilter doesn't have caching similar to the > collectionStateCache in CloudSolrClient, so if the node doesn't know about a > collection in the new state format, it just live-fetch it from Zookeeper on > every request. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
***UNCHECKED*** [JENKINS] Lucene-Solr-master-Linux (32bit/jdk1.8.0_144) - Build # 21351 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21351/ Java: 32bit/jdk1.8.0_144 -server -XX:+UseConcMarkSweepGC 1 tests failed. FAILED: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testNodeLostTrigger Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([838C9FF039B89AE1:877236E20372187B]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertTrue(Assert.java:54) at org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testNodeLostTrigger(TriggerIntegrationTest.java:521) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Build Log: [...truncated 13492 lines...] [junit4] Suite: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest [junit4] 2> Creating dataDir:
[jira] [Commented] (SOLR-8327) SolrDispatchFilter is not caching new state format, which results in live fetch from ZK per request if node does not contain core from collection
[ https://issues.apache.org/jira/browse/SOLR-8327?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341284#comment-16341284 ] Ishan Chattopadhyaya commented on SOLR-8327: bq. The following code snippet would fetch the state and parse it irrespective of whether the state is updated. It should download the changed state only if the znode version is changed I couldn't find a way to do this without two calls to ZK. Noble/John, unless there's an easy optimization that can be done here, do you think we should just go with the current patch and optimize later (say, using the smart caching technique)? > SolrDispatchFilter is not caching new state format, which results in live > fetch from ZK per request if node does not contain core from collection > - > > Key: SOLR-8327 > URL: https://issues.apache.org/jira/browse/SOLR-8327 > Project: Solr > Issue Type: Bug > Components: SolrCloud >Affects Versions: 5.3 >Reporter: Jessica Cheng Mallet >Assignee: Varun Thacker >Priority: Major > Labels: solrcloud > Attachments: SOLR-8327.patch > > > While perf testing with non-solrj client (request can be sent to any solr > node), we noticed a huge amount of data from Zookeeper in our tcpdump (~1G > for 20 second dump). From the thread dump, we noticed this: > java.lang.Object.wait (Native Method) > java.lang.Object.wait (Object.java:503) > org.apache.zookeeper.ClientCnxn.submitRequest (ClientCnxn.java:1309) > org.apache.zookeeper.ZooKeeper.getData (ZooKeeper.java:1152) > org.apache.solr.common.cloud.SolrZkClient$7.execute (SolrZkClient.java:345) > org.apache.solr.common.cloud.SolrZkClient$7.execute (SolrZkClient.java:342) > org.apache.solr.common.cloud.ZkCmdExecutor.retryOperation > (ZkCmdExecutor.java:61) > org.apache.solr.common.cloud.SolrZkClient.getData (SolrZkClient.java:342) > org.apache.solr.common.cloud.ZkStateReader.getCollectionLive > (ZkStateReader.java:841) > org.apache.solr.common.cloud.ZkStateReader$7.get (ZkStateReader.java:515) > org.apache.solr.common.cloud.ClusterState.getCollectionOrNull > (ClusterState.java:175) > org.apache.solr.common.cloud.ClusterState.getLeader (ClusterState.java:98) > org.apache.solr.servlet.HttpSolrCall.getCoreByCollection > (HttpSolrCall.java:784) > org.apache.solr.servlet.HttpSolrCall.init (HttpSolrCall.java:272) > org.apache.solr.servlet.HttpSolrCall.call (HttpSolrCall.java:417) > org.apache.solr.servlet.SolrDispatchFilter.doFilter > (SolrDispatchFilter.java:210) > org.apache.solr.servlet.SolrDispatchFilter.doFilter > (SolrDispatchFilter.java:179) > org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter > (ServletHandler.java:1652) > org.eclipse.jetty.servlet.ServletHandler.doHandle (ServletHandler.java:585) > org.eclipse.jetty.server.handler.ScopedHandler.handle (ScopedHandler.java:143) > org.eclipse.jetty.security.SecurityHandler.handle (SecurityHandler.java:577) > org.eclipse.jetty.server.session.SessionHandler.doHandle > (SessionHandler.java:223) > org.eclipse.jetty.server.handler.ContextHandler.doHandle > (ContextHandler.java:1127) > org.eclipse.jetty.servlet.ServletHandler.doScope (ServletHandler.java:515) > org.eclipse.jetty.server.session.SessionHandler.doScope > (SessionHandler.java:185) > org.eclipse.jetty.server.handler.ContextHandler.doScope > (ContextHandler.java:1061) > org.eclipse.jetty.server.handler.ScopedHandler.handle (ScopedHandler.java:141) > org.eclipse.jetty.server.handler.ContextHandlerCollection.handle > (ContextHandlerCollection.java:215) > org.eclipse.jetty.server.handler.HandlerCollection.handle > (HandlerCollection.java:110) > org.eclipse.jetty.server.handler.HandlerWrapper.handle > (HandlerWrapper.java:97) > org.eclipse.jetty.server.Server.handle (Server.java:499) > org.eclipse.jetty.server.HttpChannel.handle (HttpChannel.java:310) > org.eclipse.jetty.server.HttpConnection.onFillable (HttpConnection.java:257) > org.eclipse.jetty.io.AbstractConnection$2.run (AbstractConnection.java:540) > org.eclipse.jetty.util.thread.QueuedThreadPool.runJob > (QueuedThreadPool.java:635) > org.eclipse.jetty.util.thread.QueuedThreadPool$3.run > (QueuedThreadPool.java:555) > java.lang.Thread.run (Thread.java:745) > Looks like SolrDispatchFilter doesn't have caching similar to the > collectionStateCache in CloudSolrClient, so if the node doesn't know about a > collection in the new state format, it just live-fetch it from Zookeeper on > every request. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-11910) Solr 6.6 doesn't release physical memory
[ https://issues.apache.org/jira/browse/SOLR-11910?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Erick Erickson resolved SOLR-11910. --- Resolution: Not A Problem Please raise this question on the user's list at solr-u...@lucene.apache.org, see: (http://lucene.apache.org/solr/community.html#mailing-lists-irc) there are a _lot_ more people watching that list who may be able to help. If it's determined that this really is a code issue in Solr and not a configuration/usage problem, we can raise a new JIRA or reopen this one. You're probably seeing this, and if so it's entirely normal behavior: [http://blog.thetaphi.de/2012/07/use-lucenes-mmapdirectory-on-64bit.html] > Solr 6.6 doesn't release physical memory > > > Key: SOLR-11910 > URL: https://issues.apache.org/jira/browse/SOLR-11910 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrJ >Affects Versions: 6.6.2 > Environment: Centos7. JDK 1.8, Solr 6.6 > EC2 > 1 Cores > 15gb ram > 30gb EBS >Reporter: Gabriel Luiz Pereira >Priority: Critical > > We are using 14 cores in solr with a machine that had 2 cores and 15gb ram, > JVM HEAD setted to 4gb. Solr are consuming physical memory until it's over > and don't release them. I've searched in foruns and issues and found a try to > change NMapDirectory to NIOFSDirectory but not works. Is it possible to > resolve? -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341267#comment-16341267 ] David Smiley commented on SOLR-11722: - The 3rd commit refactored out a TimeRoutedAlias class from some commonly needed code (including parameter validation). This functionality is all a bit more tidy now. Another change of note is that I made the the TRA tolerant of the possibility that the collection may already exist. It's not likely but if so it's adopted. [~noble.paul] FYI you may find the commit [https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=d8e9ab8] interesting as it relates to the API v2/v1 mapping, and fixed a bug which we discovered while working on the feature here. > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341262#comment-16341262 ] ASF subversion and git services commented on SOLR-11722: Commit 3c9829efad47d92497afa60ce557ee12fab0934a in lucene-solr's branch refs/heads/master from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=3c9829e ] SOLR-11722: CHANGES.txt > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
***UNCHECKED*** [jira] [Commented] (SOLR-11828) Solr tests fail on Fedora 26, 27
[ https://issues.apache.org/jira/browse/SOLR-11828?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341256#comment-16341256 ] Ishan Chattopadhyaya commented on SOLR-11828: - I kept Fedora 26 updated, and, to my surprise, the tests are passing fine now :-) I upgraded to 27, and the same story. Perhaps some latest update fixed this. > Solr tests fail on Fedora 26, 27 > > > Key: SOLR-11828 > URL: https://issues.apache.org/jira/browse/SOLR-11828 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Ishan Chattopadhyaya >Priority: Major > > This may be a non-Solr issue, but I am not fully sure. I see tons of test > failures on Fedora 26 and 27, but everything is fine on Fedora 25. This is > the case even when the same kernel version was used for both 25 and 26 > (passed on 25, failed on 26). Reasons of failure seem to be ZK connection > loss. Using docker container for Fedora 25 seems to work. > Filing a JIRA just so that someone can investigate and also so that someone > avoids using Solr on production on these distributions, until a fix is found. > BTW, [~gus_heck] reported that he saw similar issues with Ubuntu 17.04: > http://lucene.472066.n3.nabble.com/6-6-2-Release-tp4358534p4358682.html > Here's some discussion: > Ishan's initial post (I mistook this to be a kernel issue at first): > http://lucene.472066.n3.nabble.com/6-6-2-Release-tp4358534p4358603.html > Uwe's post: > http://lucene.472066.n3.nabble.com/6-6-2-Release-tp4358534p4358712.html -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Resolved] (SOLR-11828) Solr tests fail on Fedora 26, 27
[ https://issues.apache.org/jira/browse/SOLR-11828?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Ishan Chattopadhyaya resolved SOLR-11828. - Resolution: Cannot Reproduce Assignee: Ishan Chattopadhyaya > Solr tests fail on Fedora 26, 27 > > > Key: SOLR-11828 > URL: https://issues.apache.org/jira/browse/SOLR-11828 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) >Reporter: Ishan Chattopadhyaya >Assignee: Ishan Chattopadhyaya >Priority: Major > > This may be a non-Solr issue, but I am not fully sure. I see tons of test > failures on Fedora 26 and 27, but everything is fine on Fedora 25. This is > the case even when the same kernel version was used for both 25 and 26 > (passed on 25, failed on 26). Reasons of failure seem to be ZK connection > loss. Using docker container for Fedora 25 seems to work. > Filing a JIRA just so that someone can investigate and also so that someone > avoids using Solr on production on these distributions, until a fix is found. > BTW, [~gus_heck] reported that he saw similar issues with Ubuntu 17.04: > http://lucene.472066.n3.nabble.com/6-6-2-Release-tp4358534p4358682.html > Here's some discussion: > Ishan's initial post (I mistook this to be a kernel issue at first): > http://lucene.472066.n3.nabble.com/6-6-2-Release-tp4358534p4358603.html > Uwe's post: > http://lucene.472066.n3.nabble.com/6-6-2-Release-tp4358534p4358712.html -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341248#comment-16341248 ] ASF subversion and git services commented on SOLR-11722: Commit d8e9ab8785e638ecf07eed43055aa02332eb7862 in lucene-solr's branch refs/heads/master from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=d8e9ab8 ] SOLR-11722: Improve the v2/v1 API mapping, including a bug. Wrapped getParameterNamesIterator failed to consider attrToParams. (committing this separately from rest) > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341250#comment-16341250 ] ASF subversion and git services commented on SOLR-11722: Commit b0d244f656b5f0030bfda97aaf5e6e7ad085325c in lucene-solr's branch refs/heads/master from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=b0d244f ] SOLR-11722: Refactor out a TimeRoutedAlias class from various parts. Also allowed TRA's to be tolerant of pre-existing collections. > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11722) API to create a Time Routed Alias and first collection
[ https://issues.apache.org/jira/browse/SOLR-11722?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341249#comment-16341249 ] ASF subversion and git services commented on SOLR-11722: Commit a1828a5664983b8e26177537d233a78d2c0c33cd in lucene-solr's branch refs/heads/master from [~dsmiley] [ https://git-wip-us.apache.org/repos/asf?p=lucene-solr.git;h=a1828a5 ] SOLR-11722: New CREATEROUTEDALIAS cluster command for time routed aliases. (a refactoring of some of the related parts will follow in next commit) > API to create a Time Routed Alias and first collection > -- > > Key: SOLR-11722 > URL: https://issues.apache.org/jira/browse/SOLR-11722 > Project: Solr > Issue Type: Sub-task > Security Level: Public(Default Security Level. Issues are Public) > Components: SolrCloud >Reporter: David Smiley >Priority: Major > Attachments: SOLR-11722.patch, SOLR-11722.patch > > > This issue is about creating a single API command to create a "Time Routed > Alias" along with its first collection. Need to decide what endpoint URL it > is and parameters. > Perhaps in v2 it'd be {{/api/collections?command=create-routed-alias}} or > alternatively piggy-back off of command=create-alias but we add more options, > perhaps with a prefix like "router"? > Inputs: > * alias name > * misc collection creation metadata (e.g. config, numShards, ...) perhaps in > this context with a prefix like "collection." > * metadata for TimeRoutedAliasUpdateProcessor, currently: router.field > * date specifier for first collection; can include "date math". > We'll certainly add more options as future features unfold. > I believe the collection needs to be created first (referring to the alias > name via a core property), and then the alias pointing to it which demands > collections exist first. When figuring the collection name, you'll need to > reference the format in TimeRoutedAliasUpdateProcessor. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-11766) Ref Guide: redesign Streaming Expression reference pages
[ https://issues.apache.org/jira/browse/SOLR-11766?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341225#comment-16341225 ] Cassandra Targett commented on SOLR-11766: -- An interesting idea, Joel, and I totally understand why you're going that way. A few counter-points, or maybe just additional things to consider. bq. Streaming Expressions tend to hold up the release of the main reference guide. At the risk of sounding flippant or disrespectful, there's a really easy way to solve that: do your doc updates at the same time you add a new expression. The "wait until the RC has been cut then start doc updates" approach is the only thing holding up merging the Ref Guide release with the main artifact release (and I'll add it is not in the slightest pleasant for me, the one who is nearly always the RM). Sure, one way around it is to release separately, but a more direct way is to just do them at the same time. bq, If they we're in there own guide they could release later At some point we need to split up the Ref Guide - it's massive (1,200 pages in PDF) and grows more untenable with each release - but there are a number of factors to consider: # Unless all the separate PDFs are released at the same time, they require VOTE threads for each release. # Even if we changed to consider the HTML as the official format, we would still need a VOTE for each release. That means instead of going to a single release process for Lucene/Solr + Ref Guide, we'd still have at least 2 and possibly 3 or more release processes. The current Ref Guide release takes ~1 week of my time full-time every time - it's not a hit the button and walk away kind of thing if you want it done with any quality. IOW, since I'm the one doing it 99.9% of the time, I don't have time for more than a single release cycle no matter how many PDFs are generated. # I've gone through the exercise in the past of splitting a single massive PDF into multiple separate PDFs, and one of the most hellish aspects of it was trying to handle links to sections that end up in different PDF files (like, I want to link to details about fields while describing the types of fields expressions would support). You can't. And since we have both an HTML and PDF format, links that work for the HTML version won't work for the PDF, so there would be additional issues to straighten out to make that work properly. bq. Right now the Streaming Expression documentation is being wedged into an existing format I infer from this comment that you have ideas but think they may not possible. Beyond splitting the pages into sub-sections, I don't recall other ideas you've shared that were rejected due to format limitations...but perhaps I'm forgetting something? We have two real limitations, IMO: 1) the fact that the official release artifact is the PDF, which is very book-like and does not support many interactive features we may want to include; and 2) our imaginations and skills. Regarding the first limitation, some users want a PDF. We would still be asked to create one even if it was not the "official" format that we vote on. I don't see it going away entirely, so that limitation will always be around. For the second - The HTML version is incredibly flexible. We can lay out those pages however we want. We can make the Streaming Expression pages look totally different than other pages by providing a layout template for them and telling each page to use it. We can add javascript this or that to do all kinds of fun things. But we need ideas first (I've shared those I've had), and then possibly someone with deeper skills than mine to make it happen. If we do come up with some great ideas, we may need to do a couple of things to make the same info appear properly in the PDF, but nearly anything you can think of for online information design is within our grasp if we have a) the ideas and the ability to implement them, and b) the willingness to maintain it release-to-release as new expressions are added. It's not that hard to make things work properly in the PDF, you just need to be cognizant of the need for it. At the very least, I think we should move Streaming Expressions out of the "Searching" section and make it a top-level section - the scope of what it can do is way beyond "Searching" now and it deserves it - I'll do that in the patch I'm working on with the other changes I've been making. But before we go ahead and make it a standalone Guide on its own, I'd like to hear your point of view on the other issues I've raised here. > Ref Guide: redesign Streaming Expression reference pages > > > Key: SOLR-11766 > URL: https://issues.apache.org/jira/browse/SOLR-11766 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default
***UNCHECKED*** [jira] [Comment Edited] (SOLR-10307) Provide SSL/TLS keystore password a more secure way
[ https://issues.apache.org/jira/browse/SOLR-10307?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341189#comment-16341189 ] Michael Suzuki edited comment on SOLR-10307 at 1/26/18 3:57 PM: [~manokovacs] I noticed the following line of code in SSLConfigurations.java: {code:java} if (isEmpty(System.getProperty(SysProps.SSL_TRUST_STORE_PASSWORD)) && !(isEmpty(clientTruststorePassword) && isEmpty(truststorePassword))) {{code} Why do we check for SysProps.SSL_TRUST_STORE_PASSWORD, when that is populated the SSL fails to start correctly. To recreate the issue start solr with ssl and pass the following: {code} -Djavax.net.ssl.keyStorePassword=yourpassword. {code} As the System.getProperty(SysProps.SSL_TRUST_STORE_PASSWORD is not empty it will skip the block of code and as a result it is unaware of the password and defaults to secret as per the jetty-ssl.xml was (Author: michaelsuzuki): [~manokovacs] I noticed the following line of code in SSLConfigurations.java: {code:java} if (isEmpty(System.getProperty(SysProps.SSL_TRUST_STORE_PASSWORD)) && !(isEmpty(clientTruststorePassword) && isEmpty(truststorePassword))) {{code} Why do we check for SysProps.SSL_TRUST_STORE_PASSWORD, when that is populated the SSL fails to start correctly. To recreate the issue start solr with ssl and pass the following: {code} -Djavax.net.ssl.keyStorePassword=yourpassword. {code} As the System.getProperty(SysProps.SSL_TRUST_STORE_PASSWORD is not empty it will skip the block of code, as a result it is unaware of the password and defaults to secret as per the jetty-ssl.xml > Provide SSL/TLS keystore password a more secure way > --- > > Key: SOLR-10307 > URL: https://issues.apache.org/jira/browse/SOLR-10307 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: security >Reporter: Mano Kovacs >Assignee: Mark Miller >Priority: Major > Fix For: 6.7, 7.0 > > Attachments: SOLR-10307.2.patch, SOLR-10307.patch, SOLR-10307.patch, > SOLR-10307.patch > > > Currently the only way to pass server and client side SSL keytstore and > truststore passwords is to set specific environment variables that will be > passed as system properties, through command line parameter. > First option is to pass passwords through environment variables which gives a > better level of protection. Second option would be to use hadoop credential > provider interface to access credential store. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
[jira] [Commented] (SOLR-10307) Provide SSL/TLS keystore password a more secure way
[ https://issues.apache.org/jira/browse/SOLR-10307?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel=16341189#comment-16341189 ] Michael Suzuki commented on SOLR-10307: --- [~manokovacs] I noticed the following line of code in SSLConfigurations.java: {code:java} if (isEmpty(System.getProperty(SysProps.SSL_TRUST_STORE_PASSWORD)) && !(isEmpty(clientTruststorePassword) && isEmpty(truststorePassword))) {{code} Why do we check for SysProps.SSL_TRUST_STORE_PASSWORD, when that is populated the SSL fails to start correctly. To recreate the issue start solr with ssl and pass the following: {code} -Djavax.net.ssl.keyStorePassword=yourpassword. {code} As the System.getProperty(SysProps.SSL_TRUST_STORE_PASSWORD is not empty it will skip the block of code, as a result it is unaware of the password and defaults to secret as per the jetty-ssl.xml > Provide SSL/TLS keystore password a more secure way > --- > > Key: SOLR-10307 > URL: https://issues.apache.org/jira/browse/SOLR-10307 > Project: Solr > Issue Type: Improvement > Security Level: Public(Default Security Level. Issues are Public) > Components: security >Reporter: Mano Kovacs >Assignee: Mark Miller >Priority: Major > Fix For: 6.7, 7.0 > > Attachments: SOLR-10307.2.patch, SOLR-10307.patch, SOLR-10307.patch, > SOLR-10307.patch > > > Currently the only way to pass server and client side SSL keytstore and > truststore passwords is to set specific environment variables that will be > passed as system properties, through command line parameter. > First option is to pass passwords through environment variables which gives a > better level of protection. Second option would be to use hadoop credential > provider interface to access credential store. -- This message was sent by Atlassian JIRA (v7.6.3#76005) - To unsubscribe, e-mail: dev-unsubscr...@lucene.apache.org For additional commands, e-mail: dev-h...@lucene.apache.org
***UNCHECKED*** [JENKINS] Lucene-Solr-7.x-MacOSX (64bit/jdk-9) - Build # 422 - Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-MacOSX/422/ Java: 64bit/jdk-9 -XX:+UseCompressedOops -XX:+UseParallelGC 2 tests failed. FAILED: org.apache.solr.cloud.FullSolrCloudDistribCmdsTest.test Error Message: Could not find collection:collection2 Stack Trace: java.lang.AssertionError: Could not find collection:collection2 at __randomizedtesting.SeedInfo.seed([BEA269D0808D5A8F:36F6560A2E713777]:0) at org.junit.Assert.fail(Assert.java:93) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNotNull(Assert.java:526) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:155) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:140) at org.apache.solr.cloud.AbstractDistribZkTestBase.waitForRecoveriesToFinish(AbstractDistribZkTestBase.java:135) at org.apache.solr.cloud.AbstractFullDistribZkTestBase.waitForRecoveriesToFinish(AbstractFullDistribZkTestBase.java:915) at org.apache.solr.cloud.FullSolrCloudDistribCmdsTest.testIndexingBatchPerRequestWithHttpSolrClient(FullSolrCloudDistribCmdsTest.java:612) at org.apache.solr.cloud.FullSolrCloudDistribCmdsTest.test(FullSolrCloudDistribCmdsTest.java:152) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:564) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsFixedStatement.callStatement(BaseDistributedSearchTestCase.java:993) at org.apache.solr.BaseDistributedSearchTestCase$ShardsRepeatRule$ShardsStatement.evaluate(BaseDistributedSearchTestCase.java:968) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at
***UNCHECKED*** Re: [JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-10-ea+41) - Build # 1250 - Still Failing!
there it is with latest JVM, again from analysis/icu. I think we should report a bug. On Fri, Jan 26, 2018 at 10:30 AM, Policeman Jenkins Serverwrote: > Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1250/ > Java: 64bit/jdk-10-ea+41 -XX:-UseCompressedOops -XX:+UseG1GC > > All tests passed > > Build Log: > [...truncated 3327 lines...] >[junit4] JVM J1: stdout was not empty, see: > /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/temp/junit4-J1-20180126_153005_25116767526503423538987.sysout >[junit4] >>> JVM J1 emitted unexpected output (verbatim) >[junit4] Default case invoked for: >[junit4]opcode = 0, "Node" >[junit4] # >[junit4] # A fatal error has been detected by the Java Runtime Environment: >[junit4] # >[junit4] # SIGSEGV (0xb) at pc=0x7f988181ea35, pid=15712, tid=15792 >[junit4] # >[junit4] # JRE version: OpenJDK Runtime Environment (10.0+41) (build > 10-ea+41) >[junit4] # Java VM: OpenJDK 64-Bit Server VM (10-ea+41, mixed mode, > tiered, g1 gc, linux-amd64) >[junit4] # Problematic frame: >[junit4] # V [libjvm.so+0xb31a35] Node::add_req(Node*)+0xb5 >[junit4] # >[junit4] # No core dump will be written. Core dumps have been disabled. To > enable core dumping, try "ulimit -c unlimited" before starting Java again >[junit4] # >[junit4] # An error report file with more information is saved as: >[junit4] # > /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1/hs_err_pid15712.log >[junit4] # >[junit4] # Compiler replay data is saved as: >[junit4] # > /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1/replay_pid15712.log >[junit4] # >[junit4] # If you would like to submit a bug report, please visit: >[junit4] # http://bugreport.java.com/bugreport/crash.jsp >[junit4] # >[junit4] <<< JVM J1: EOF > > [...truncated 3 lines...] >[junit4] ERROR: JVM J1 ended with an exception, command line: > /home/jenkins/tools/java/64bit/jdk-10-ea+41/bin/java -XX:-UseCompressedOops > -XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError > -XX:HeapDumpPath=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/heapdumps -ea > -esa --illegal-access=deny -Dtests.prefix=tests -Dtests.seed=F19E4EFC03FEFEC8 > -Xmx512M -Dtests.iters= -Dtests.verbose=false -Dtests.infostream=false > -Dtests.codec=random -Dtests.postingsformat=random > -Dtests.docvaluesformat=random -Dtests.locale=random -Dtests.timezone=random > -Dtests.directory=random -Dtests.linedocsfile=europarl.lines.txt.gz > -Dtests.luceneMatchVersion=7.3.0 -Dtests.cleanthreads=perMethod > -Djava.util.logging.config.file=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/tools/junit4/logging.properties > -Dtests.nightly=false -Dtests.weekly=false -Dtests.monster=false > -Dtests.slow=true -Dtests.asserts=true -Dtests.multiplier=3 -DtempDir=./temp > -Djava.io.tmpdir=./temp > -Djunit4.tempDir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/temp > -Dcommon.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene > -Dclover.db.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/clover/db > > -Djava.security.policy=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/tools/junit4/tests.policy > -Dtests.LUCENE_VERSION=7.3.0 -Djetty.testMode=1 -Djetty.insecurerandom=1 > -Dsolr.directoryFactory=org.apache.solr.core.MockDirectoryFactory > -Djava.awt.headless=true -Djdk.map.althashing.threshold=0 > -Dtests.src.home=/home/jenkins/workspace/Lucene-Solr-7.x-Linux > -Djava.security.egd=file:/dev/./urandom > -Djunit4.childvm.cwd=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1 > -Djunit4.childvm.id=1 -Djunit4.childvm.count=3 -Dfile.encoding=UTF-8 > -Djava.security.manager=org.apache.lucene.util.TestSecurityManager > -Dtests.filterstacks=true -Dtests.leaveTemporary=false -classpath >
[JENKINS-EA] Lucene-Solr-7.x-Linux (64bit/jdk-10-ea+41) - Build # 1250 - Still Failing!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-7.x-Linux/1250/ Java: 64bit/jdk-10-ea+41 -XX:-UseCompressedOops -XX:+UseG1GC All tests passed Build Log: [...truncated 3327 lines...] [junit4] JVM J1: stdout was not empty, see: /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/temp/junit4-J1-20180126_153005_25116767526503423538987.sysout [junit4] >>> JVM J1 emitted unexpected output (verbatim) [junit4] Default case invoked for: [junit4]opcode = 0, "Node" [junit4] # [junit4] # A fatal error has been detected by the Java Runtime Environment: [junit4] # [junit4] # SIGSEGV (0xb) at pc=0x7f988181ea35, pid=15712, tid=15792 [junit4] # [junit4] # JRE version: OpenJDK Runtime Environment (10.0+41) (build 10-ea+41) [junit4] # Java VM: OpenJDK 64-Bit Server VM (10-ea+41, mixed mode, tiered, g1 gc, linux-amd64) [junit4] # Problematic frame: [junit4] # V [libjvm.so+0xb31a35] Node::add_req(Node*)+0xb5 [junit4] # [junit4] # No core dump will be written. Core dumps have been disabled. To enable core dumping, try "ulimit -c unlimited" before starting Java again [junit4] # [junit4] # An error report file with more information is saved as: [junit4] # /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1/hs_err_pid15712.log [junit4] # [junit4] # Compiler replay data is saved as: [junit4] # /home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1/replay_pid15712.log [junit4] # [junit4] # If you would like to submit a bug report, please visit: [junit4] # http://bugreport.java.com/bugreport/crash.jsp [junit4] # [junit4] <<< JVM J1: EOF [...truncated 3 lines...] [junit4] ERROR: JVM J1 ended with an exception, command line: /home/jenkins/tools/java/64bit/jdk-10-ea+41/bin/java -XX:-UseCompressedOops -XX:+UseG1GC -XX:+HeapDumpOnOutOfMemoryError -XX:HeapDumpPath=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/heapdumps -ea -esa --illegal-access=deny -Dtests.prefix=tests -Dtests.seed=F19E4EFC03FEFEC8 -Xmx512M -Dtests.iters= -Dtests.verbose=false -Dtests.infostream=false -Dtests.codec=random -Dtests.postingsformat=random -Dtests.docvaluesformat=random -Dtests.locale=random -Dtests.timezone=random -Dtests.directory=random -Dtests.linedocsfile=europarl.lines.txt.gz -Dtests.luceneMatchVersion=7.3.0 -Dtests.cleanthreads=perMethod -Djava.util.logging.config.file=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/tools/junit4/logging.properties -Dtests.nightly=false -Dtests.weekly=false -Dtests.monster=false -Dtests.slow=true -Dtests.asserts=true -Dtests.multiplier=3 -DtempDir=./temp -Djava.io.tmpdir=./temp -Djunit4.tempDir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/temp -Dcommon.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene -Dclover.db.dir=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/clover/db -Djava.security.policy=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/tools/junit4/tests.policy -Dtests.LUCENE_VERSION=7.3.0 -Djetty.testMode=1 -Djetty.insecurerandom=1 -Dsolr.directoryFactory=org.apache.solr.core.MockDirectoryFactory -Djava.awt.headless=true -Djdk.map.althashing.threshold=0 -Dtests.src.home=/home/jenkins/workspace/Lucene-Solr-7.x-Linux -Djava.security.egd=file:/dev/./urandom -Djunit4.childvm.cwd=/home/jenkins/workspace/Lucene-Solr-7.x-Linux/lucene/build/analysis/icu/test/J1 -Djunit4.childvm.id=1 -Djunit4.childvm.count=3 -Dfile.encoding=UTF-8 -Djava.security.manager=org.apache.lucene.util.TestSecurityManager -Dtests.filterstacks=true -Dtests.leaveTemporary=false -classpath
[jira] [Updated] (SOLR-11904) IndexFetcher Http client requests are unauthenticated
[ https://issues.apache.org/jira/browse/SOLR-11904?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Kyriacos Christoudias updated SOLR-11904: - Description: Whenever the IndexFetcher class is called for recovery or replication the HTTP requests are unauthenticated resulting in 401 errors: {code:java} 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not available. Index fetch failed by exception: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://server1:8983/solr/myCollection_shard1_replica_t4: Expected mime type application/octet-stream but got text/html. Error 401 Unauthorized request, Response code: 401 HTTP ERROR 401 Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: Unauthorized request, Response code: 401 {code} I checked the source code and before creating a new HTTP client in IndexFetcher it tries to get http auth user/password from initArgs but those properties are always null, event if I set them in the replication handler (under the slave tag). {code:java} http://remote_host:port/solr/core_name/replication username password {code} I downloaded the solr source code and entered the username and password hardcoded, compiled the jar and everything was working fine. Before this I used the logger to check the httpBasicAuthUser/httpBasicAuthPassword and whenever IndexFetcher was called for recovery or replication these fields were null. was: Whenever the IndexFetcher class is called for recovery or replication the HTTP requests are unauthenticated resulting in 401 errors: {code:java} 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not available. Index fetch failed by exception: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://server1:8983/solr/myCollection_shard1_replica_t4: Expected mime type application/octet-stream but got text/html. Error 401 Unauthorized request, Response code: 401 HTTP ERROR 401 Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: Unauthorized request, Response code: 401 {code} I checked the source code and before creating a new HTTP client in IndexFetcher it tries to get http auth user/password from initArgs but those properties are always null, event if I set them in the replication handler (under the slave tag). {code:java} http://remote_host:port/solr/core_name/replication username password {code} I downloaded the solr source code and entered the username and password hardcoded, compiled the jar and everything was working fine. Before this I used the logger to check the httpBasicAuthUser/httpBasicAuthPassword and whenever IndexFetcher was called for recovery or replication these fields were null. > IndexFetcher Http client requests are unauthenticated > - > > Key: SOLR-11904 > URL: https://issues.apache.org/jira/browse/SOLR-11904 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: Authentication, replication (java), SolrCloud >Affects Versions: 7.2 > Environment: Three servers on solrcloud. One collection with 2 shards > and 3 tlog replicas on each shard. >Reporter: Kyriacos Christoudias >Priority: Major > > Whenever the IndexFetcher class is called for recovery or replication the > HTTP requests are unauthenticated resulting in 401 errors: > > {code:java} > 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection > s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher > Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not > available. Index fetch failed by exception: > org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error > from server at http://server1:8983/solr/myCollection_shard1_replica_t4: > Expected mime type application/octet-stream but got text/html. > > > Error 401 Unauthorized request, Response code: 401 > > HTTP ERROR 401 > Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: > Unauthorized request, Response code: 401 > > > {code} > > I checked the source code and before creating a new HTTP client in > IndexFetcher it tries to get http auth user/password from initArgs but those > properties are always null, event if I set them in the replication handler > (under
[jira] [Updated] (SOLR-11904) IndexFetcher Http client requests are unauthenticated
[ https://issues.apache.org/jira/browse/SOLR-11904?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Kyriacos Christoudias updated SOLR-11904: - Description: Whenever the IndexFetcher class is called for recovery or replication the HTTP requests are unauthenticated resulting in 401 errors: {code:java} 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not available. Index fetch failed by exception: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://server1:8983/solr/myCollection_shard1_replica_t4: Expected mime type application/octet-stream but got text/html. Error 401 Unauthorized request, Response code: 401 HTTP ERROR 401 Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: Unauthorized request, Response code: 401 {code} I checked the source code and before creating a new HTTP client in IndexFetcher it tries to get http auth user/password from initArgs but those properties are always null, event if I set them in the replication handler (under the slave tag). {code:java} http://remote_host:port/solr/core_name/replication username password {code} I downloaded the solr source code and entered the username and password hardcoded, compiled the jar and everything was working fine. Before this I used the logger to check the httpBasicAuthUser/httpBasicAuthPassword and whenever IndexFetcher was called for recovery or replication these fields were null. was: Whenever the IndexFetcher class is called for recovery or replication the HTTP requests are unauthenticated resulting in 401 errors: {code:java} 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not available. Index fetch failed by exception: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://server1:8983/solr/myCollection_shard1_replica_t4: Expected mime type application/octet-stream but got text/html. Error 401 Unauthorized request, Response code: 401 HTTP ERROR 401 Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: Unauthorized request, Response code: 401 {code} I checked the source code and before creating a new HTTP client in IndexFetcher it tries to get http auth user/password from initArgs but those properties are always null, event if I set them in the replication handler (under the slave tag). {color:#008000}{color} {color:#008000} {color}http://remote_host:port/solr/core_name/replication{color:#008000}{color} {color}username{color:#008000}{color} {color:#008000} {color}password{color:#008000}{color} {{{color:#008000}{color}}} I downloaded the solr source code and entered the username and password hardcoded, compiled the jar and everything was working fine. Before this I used the logger to check the httpBasicAuthUser/httpBasicAuthPassword and whenever IndexFetcher was called for recovery or replication these fields were null. > IndexFetcher Http client requests are unauthenticated > - > > Key: SOLR-11904 > URL: https://issues.apache.org/jira/browse/SOLR-11904 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: Authentication, replication (java), SolrCloud >Affects Versions: 7.2 > Environment: Three servers on solrcloud. One collection with 2 shards > and 3 tlog replicas on each shard. >Reporter: Kyriacos Christoudias >Priority: Major > > > > Whenever the IndexFetcher class is called for recovery or replication the > HTTP requests are unauthenticated resulting in 401 errors: > > > {code:java} > 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection > s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher > Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not > available. Index fetch failed by exception: > org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error > from server at http://server1:8983/solr/myCollection_shard1_replica_t4: > Expected mime type application/octet-stream but got text/html. > > > Error
[jira] [Updated] (SOLR-11904) IndexFetcher Http client requests are unauthenticated
[ https://issues.apache.org/jira/browse/SOLR-11904?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Kyriacos Christoudias updated SOLR-11904: - Description: Whenever the IndexFetcher class is called for recovery or replication the HTTP requests are unauthenticated resulting in 401 errors: {code:java} 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not available. Index fetch failed by exception: org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error from server at http://server1:8983/solr/myCollection_shard1_replica_t4: Expected mime type application/octet-stream but got text/html. Error 401 Unauthorized request, Response code: 401 HTTP ERROR 401 Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: Unauthorized request, Response code: 401 {code} I checked the source code and before creating a new HTTP client in IndexFetcher it tries to get http auth user/password from initArgs but those properties are always null, event if I set them in the replication handler (under the slave tag). {color:#008000}{color} {color:#008000} {color}http://remote_host:port/solr/core_name/replication{color:#008000}{color} {color}username{color:#008000}{color} {color:#008000} {color}password{color:#008000}{color} {{{color:#008000}{color}}} I downloaded the solr source code and entered the username and password hardcoded, compiled the jar and everything was working fine. Before this I used the logger to check the httpBasicAuthUser/httpBasicAuthPassword and whenever IndexFetcher was called for recovery or replication these fields were null. was: Whenever the IndexFetcher class is called for recovery or replication the HTTP requests are unauthenticated resulting in 401 errors. I checked the source code and before creating a new HTTP client in IndexFetcher it tries to get http auth user/password from initArgs but those properties are always null, event if I set them in the replication handler (under the slave tag). {color:#008000} {color} {color:#008000} {color}http://remote_host:port/solr/core_name/replication{color:#008000}{color} {color}username{color:#008000}{color} {color:#008000} {color}password{color:#008000}{color} {{{color:#008000}{color}}} I downloaded the solr source code and entered the username and password hardcoded, compiled the jar and everything was working fine. Before this I used the logger to check the httpBasicAuthUser/httpBasicAuthPassword and whenever IndexFetcher was called for recovery or replication these fields were null. > IndexFetcher Http client requests are unauthenticated > - > > Key: SOLR-11904 > URL: https://issues.apache.org/jira/browse/SOLR-11904 > Project: Solr > Issue Type: Bug > Security Level: Public(Default Security Level. Issues are Public) > Components: Authentication, replication (java), SolrCloud >Affects Versions: 7.2 > Environment: Three servers on solrcloud. One collection with 2 shards > and 3 tlog replicas on each shard. >Reporter: Kyriacos Christoudias >Priority: Major > > > > Whenever the IndexFetcher class is called for recovery or replication the > HTTP requests are unauthenticated resulting in 401 errors: > > > {code:java} > 2018-01-25 13:16:22.538 WARN (indexFetcher-25-thread-1) [c:myCollection > s:shard1 r:core_node3 x:myCollection_shard1_replica_t1] o.a.s.h.IndexFetcher > Master at: http://server1:8983/solr/myCollection_shard1_replica_t4/ is not > available. Index fetch failed by exception: > org.apache.solr.client.solrj.impl.HttpSolrClient$RemoteSolrException: Error > from server at http://server1:8983/solr/myCollection_shard1_replica_t4: > Expected mime type application/octet-stream but got text/html. > > > Error 401 Unauthorized request, Response code: 401 > > HTTP ERROR 401 > Problem accessing /solr/myCollection_shard1_replica_t4/replication. Reason: > Unauthorized request, Response code: 401 > > > {code} > > > I checked the source code and before creating a new HTTP client in >
***UNCHECKED*** [JENKINS] Lucene-Solr-master-Linux (64bit/jdk1.8.0_144) - Build # 21350 - Still Unstable!
Build: https://jenkins.thetaphi.de/job/Lucene-Solr-master-Linux/21350/ Java: 64bit/jdk1.8.0_144 -XX:-UseCompressedOops -XX:+UseG1GC 1 tests failed. FAILED: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testMetricTrigger Error Message: Stack Trace: java.lang.AssertionError at __randomizedtesting.SeedInfo.seed([1C951204C779B7D:BBC566AF139F4D32]:0) at org.junit.Assert.fail(Assert.java:92) at org.junit.Assert.assertTrue(Assert.java:43) at org.junit.Assert.assertNull(Assert.java:551) at org.junit.Assert.assertNull(Assert.java:562) at org.apache.solr.cloud.autoscaling.TriggerIntegrationTest.testMetricTrigger(TriggerIntegrationTest.java:1575) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at java.lang.Thread.run(Thread.java:748) Build Log: [...truncated 13620 lines...] [junit4] Suite: org.apache.solr.cloud.autoscaling.TriggerIntegrationTest [junit4] 2> Creating dataDir:
[JENKINS] Lucene-Solr-Tests-7.x - Build # 334 - Still Failing
Build: https://builds.apache.org/job/Lucene-Solr-Tests-7.x/334/ 11 tests failed. FAILED: org.apache.solr.cloud.api.collections.CollectionsAPIDistributedZkTest.testCollectionsAPI Error Message: We have shards using the same indexDir. E.g. shards [core.test.shard2, core.test.shard1, core..system] all use indexDir (closed) Stack Trace: java.lang.AssertionError: We have shards using the same indexDir. E.g. shards [core.test.shard2, core.test.shard1, core..system] all use indexDir (closed) at __randomizedtesting.SeedInfo.seed([A95B73A61123EBCC:E12E07121710C459]:0) at org.junit.Assert.fail(Assert.java:93) at org.apache.solr.cloud.api.collections.CollectionsAPIDistributedZkTest.checkNoTwoShardsUseTheSameIndexDir(CollectionsAPIDistributedZkTest.java:604) at org.apache.solr.cloud.api.collections.CollectionsAPIDistributedZkTest.testCollectionsAPI(CollectionsAPIDistributedZkTest.java:479) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:498) at com.carrotsearch.randomizedtesting.RandomizedRunner.invoke(RandomizedRunner.java:1737) at com.carrotsearch.randomizedtesting.RandomizedRunner$8.evaluate(RandomizedRunner.java:934) at com.carrotsearch.randomizedtesting.RandomizedRunner$9.evaluate(RandomizedRunner.java:970) at com.carrotsearch.randomizedtesting.RandomizedRunner$10.evaluate(RandomizedRunner.java:984) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.TestRuleSetupTeardownChained$1.evaluate(TestRuleSetupTeardownChained.java:49) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at org.apache.lucene.util.TestRuleThreadAndTestName$1.evaluate(TestRuleThreadAndTestName.java:48) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at com.carrotsearch.randomizedtesting.ThreadLeakControl.forkTimeoutingTask(ThreadLeakControl.java:817) at com.carrotsearch.randomizedtesting.ThreadLeakControl$3.evaluate(ThreadLeakControl.java:468) at com.carrotsearch.randomizedtesting.RandomizedRunner.runSingleTest(RandomizedRunner.java:943) at com.carrotsearch.randomizedtesting.RandomizedRunner$5.evaluate(RandomizedRunner.java:829) at com.carrotsearch.randomizedtesting.RandomizedRunner$6.evaluate(RandomizedRunner.java:879) at com.carrotsearch.randomizedtesting.RandomizedRunner$7.evaluate(RandomizedRunner.java:890) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.SystemPropertiesRestoreRule$1.evaluate(SystemPropertiesRestoreRule.java:57) at org.apache.lucene.util.AbstractBeforeAfterRule$1.evaluate(AbstractBeforeAfterRule.java:45) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleStoreClassName$1.evaluate(TestRuleStoreClassName.java:41) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.NoShadowingOrOverridesOnMethodsRule$1.evaluate(NoShadowingOrOverridesOnMethodsRule.java:40) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at org.apache.lucene.util.TestRuleAssertionsRequired$1.evaluate(TestRuleAssertionsRequired.java:53) at org.apache.lucene.util.TestRuleMarkFailure$1.evaluate(TestRuleMarkFailure.java:47) at org.apache.lucene.util.TestRuleIgnoreAfterMaxFailures$1.evaluate(TestRuleIgnoreAfterMaxFailures.java:64) at org.apache.lucene.util.TestRuleIgnoreTestSuites$1.evaluate(TestRuleIgnoreTestSuites.java:54) at com.carrotsearch.randomizedtesting.rules.StatementAdapter.evaluate(StatementAdapter.java:36) at com.carrotsearch.randomizedtesting.ThreadLeakControl$StatementRunner.run(ThreadLeakControl.java:368) at