sigram commented on code in PR #3739:
URL: https://github.com/apache/solr/pull/3739#discussion_r2413461098
##########
solr/benchmark/src/java/org/apache/solr/bench/search/ExitableDirectoryReaderSearch.java:
##########
@@ -0,0 +1,180 @@
+package org.apache.solr.bench.search;
+
+import static org.apache.solr.bench.generators.SourceDSL.integers;
+import static org.apache.solr.bench.generators.SourceDSL.strings;
+
+import java.util.Map;
+import java.util.Set;
+import java.util.concurrent.TimeUnit;
+import org.apache.solr.bench.Docs;
+import org.apache.solr.bench.MiniClusterState;
+import org.apache.solr.client.solrj.request.CollectionAdminRequest;
+import org.apache.solr.client.solrj.request.QueryRequest;
+import org.apache.solr.client.solrj.response.QueryResponse;
+import org.apache.solr.common.SolrInputDocument;
+import org.apache.solr.common.params.ModifiableSolrParams;
+import org.apache.solr.search.CallerSpecificQueryLimit;
+import org.apache.solr.search.SolrIndexSearcher;
+import org.apache.solr.util.TestInjection;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Threads;
+import org.openjdk.jmh.annotations.Warmup;
+import org.openjdk.jmh.infra.Blackhole;
+
+@Fork(value = 1)
+@BenchmarkMode(Mode.AverageTime)
+@Warmup(time = 20, iterations = 2)
+@OutputTimeUnit(TimeUnit.MILLISECONDS)
+@Measurement(time = 30, iterations = 4)
+@Threads(value = 1)
+public class ExitableDirectoryReaderSearch {
+
+ static final String COLLECTION = "c1";
+
+ @State(Scope.Benchmark)
+ public static class BenchState {
+
+ Docs queryFields;
+
+ int NUM_DOCS = 500_000;
+ int WORDS = NUM_DOCS / 100;
+
+ @Setup(Level.Trial)
+ public void setupTrial(MiniClusterState.MiniClusterBenchState
miniClusterState)
+ throws Exception {
+ miniClusterState.setUseHttp1(true);
+ System.setProperty("documentCache.enabled", "false");
+ System.setProperty("queryResultCache.enabled", "false");
+ System.setProperty("filterCache.enabled", "false");
+ System.setProperty("miniClusterBaseDir", "build/work/mini-cluster");
+ // create a lot of small segments
+ System.setProperty("segmentsPerTier", "200");
Review Comment:
The working hypothesis is that the impact of EDR should be more visible with
multiple segments because EDR constructs separate sub-readers and enums for
each segment, so the overhead should be multiplied. Whether that's the case -
well, we can now test and verify it :)
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]