This is an automated email from the ASF dual-hosted git repository. mdrob pushed a commit to branch main in repository https://gitbox.apache.org/repos/asf/solr.git
The following commit(s) were added to refs/heads/main by this push: new f68bfad SOLR-15555 Improved caching on FilterQuery (#230) f68bfad is described below commit f68bfad4c45df8bdb5b3eb15a65097f388c8f5cc Author: Mike Drob <md...@apache.org> AuthorDate: Mon Sep 13 19:56:31 2021 -0500 SOLR-15555 Improved caching on FilterQuery (#230) Create a new cache mode for CaffeineCache where we can optionally use an async cache instead of the synchronous implementation. This is useful for cases (esp FilterQuery) where many identical requests come in near the same time and they would otherwise race to fill the same cache slot. CaffeineCache computeIfAbsent now accepts an IOFunction instead of the non-throwing java.util.Function interface. This required an update to CaffeineCache 2.9, which updates putIfAbsent with an optimistic get. Also incidentally fixes a rare bug where cache ramBytesUsed would be incorrectly reported under heavy cache contention/eviction loads. --- solr/CHANGES.txt | 4 + .../java/org/apache/solr/query/SolrRangeQuery.java | 10 +- .../transform/ChildDocTransformerFactory.java | 7 +- .../solr/schema/RptWithGeometrySpatialField.java | 8 +- .../java/org/apache/solr/search/CaffeineCache.java | 160 ++++++++++++++++----- .../src/java/org/apache/solr/search/JoinQuery.java | 17 +-- .../src/java/org/apache/solr/search/SolrCache.java | 7 +- .../apache/solr/search/SolrDocumentFetcher.java | 14 +- .../org/apache/solr/search/SolrIndexSearcher.java | 78 +++++++--- .../apache/solr/search/facet/UnInvertedField.java | 77 +--------- .../solr/search/join/BlockJoinParentQParser.java | 7 +- .../apache/solr/search/join/HashRangeQuery.java | 18 +-- .../java/org/apache/solr/util/TestInjection.java | 22 +++ .../solr/collection1/conf/solrconfig.xml | 3 +- .../solr/request/TestUnInvertedFieldException.java | 11 +- .../org/apache/solr/search/TestRangeQuery.java | 80 +++++++++-- .../org/apache/solr/search/TestSolrCachePerf.java | 19 ++- .../apache/solr/search/TestSolrQueryParser.java | 24 ++-- .../org/apache/solr/search/join/BJQParserTest.java | 5 +- solr/licenses/caffeine-2.8.4.jar.sha1 | 1 - solr/licenses/caffeine-2.9.2.jar.sha1 | 1 + solr/solr-ref-guide/src/caches-warming.adoc | 14 ++ .../src/java/org/apache/solr/SolrTestCaseJ4.java | 3 +- versions.lock | 2 +- versions.props | 2 +- 25 files changed, 363 insertions(+), 231 deletions(-) diff --git a/solr/CHANGES.txt b/solr/CHANGES.txt index b3aae42..410041b 100644 --- a/solr/CHANGES.txt +++ b/solr/CHANGES.txt @@ -143,6 +143,10 @@ when told to. The admin UI now tells it to. (Nazerke Seidan, David Smiley) This was already working for XML & "javabin"/SolrJ. Previously, omitting the ID would be confused for a partial/atomic update. (David Smiley) +* SOLR-15555: Identical filter queries will no longer race to record a filterCache entry. Allows for async cache computation instead of + all queries contending for the same set of locks on the cache. Fixed a bug with ramBytes usage reporting on filterCache. + Upgraded Caffeine Cache to 2.9.2 (Mike Drob) + Other Changes ---------------------- * SOLR-15603: Add an option to activate Gradle build cache, build task cleanups (Alexis Tual, Dawid Weiss) diff --git a/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java b/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java index 3c6859c..c400d66 100644 --- a/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java +++ b/solr/core/src/java/org/apache/solr/query/SolrRangeQuery.java @@ -58,6 +58,7 @@ import org.apache.solr.search.DocSetProducer; import org.apache.solr.search.DocSetUtil; import org.apache.solr.search.ExtendedQueryBase; import org.apache.solr.search.SolrIndexSearcher; +import org.apache.solr.util.TestInjection; /** @lucene.experimental */ public final class SolrRangeQuery extends ExtendedQueryBase implements DocSetProducer { @@ -167,6 +168,7 @@ public final class SolrRangeQuery extends ExtendedQueryBase implements DocSetPro } private DocSet createDocSet(SolrIndexSearcher searcher, long cost) throws IOException { + assert TestInjection.injectDocSetDelay(); int maxDoc = searcher.maxDoc(); BitDocSet liveDocs = searcher.getLiveDocSet(); FixedBitSet liveBits = liveDocs.size() == maxDoc ? null : liveDocs.getBits(); @@ -408,20 +410,21 @@ public final class SolrRangeQuery extends ExtendedQueryBase implements DocSetPro // first time, check our filter cache boolean doCheck = !checkedFilterCache && context.ord == 0; checkedFilterCache = true; - SolrIndexSearcher solrSearcher = null; + final SolrIndexSearcher solrSearcher; if (doCheck && searcher instanceof SolrIndexSearcher) { - solrSearcher = (SolrIndexSearcher)searcher; + solrSearcher = (SolrIndexSearcher) searcher; if (solrSearcher.getFilterCache() == null) { doCheck = false; } else { - solrSearcher = (SolrIndexSearcher)searcher; answer = solrSearcher.getFilterCache().get(SolrRangeQuery.this); } } else { doCheck = false; + solrSearcher = null; } if (answer != null) { + // I'm not sure this ever happens return segStates[context.ord] = new SegState(new SegmentDocIdSet(answer, context)); } @@ -452,6 +455,7 @@ public final class SolrRangeQuery extends ExtendedQueryBase implements DocSetPro if (doCheck) { answer = createDocSet(solrSearcher, count); + // This can be a naked put because the cache usually gets checked in SolrIndexSearcher solrSearcher.getFilterCache().put(SolrRangeQuery.this, answer); return segStates[context.ord] = new SegState(new SegmentDocIdSet(answer, context)); } diff --git a/solr/core/src/java/org/apache/solr/response/transform/ChildDocTransformerFactory.java b/solr/core/src/java/org/apache/solr/response/transform/ChildDocTransformerFactory.java index 7983fd3..95902e2 100644 --- a/solr/core/src/java/org/apache/solr/response/transform/ChildDocTransformerFactory.java +++ b/solr/core/src/java/org/apache/solr/response/transform/ChildDocTransformerFactory.java @@ -17,6 +17,7 @@ package org.apache.solr.response.transform; import java.io.IOException; +import java.io.UncheckedIOException; import org.apache.lucene.search.BooleanClause; import org.apache.lucene.search.BooleanQuery; @@ -179,7 +180,11 @@ public class ChildDocTransformerFactory extends TransformerFactory { SolrCache<Query, BitSetProducer> parentCache = request.getSearcher().getCache(CACHE_NAME); // lazily retrieve from solr cache if (parentCache != null) { - return parentCache.computeIfAbsent(query, QueryBitSetProducer::new); + try { + return parentCache.computeIfAbsent(query, QueryBitSetProducer::new); + } catch (IOException e) { + throw new UncheckedIOException(e); // Shouldn't happen because QBSP doesn't throw + } } else { return new QueryBitSetProducer(query); } diff --git a/solr/core/src/java/org/apache/solr/schema/RptWithGeometrySpatialField.java b/solr/core/src/java/org/apache/solr/schema/RptWithGeometrySpatialField.java index 5ad0b5d..e32d6d9 100644 --- a/solr/core/src/java/org/apache/solr/schema/RptWithGeometrySpatialField.java +++ b/solr/core/src/java/org/apache/solr/schema/RptWithGeometrySpatialField.java @@ -163,13 +163,7 @@ public class RptWithGeometrySpatialField extends AbstractSpatialFieldType<Compos throw new IllegalStateException("Leaf " + readerContext.reader() + " is not suited for caching"); } PerSegCacheKey key = new PerSegCacheKey(cacheHelper.getKey(), docId); - Shape shape = cache.computeIfAbsent(key, k -> { - try { - return targetFuncValues.value(); - } catch (IOException e) { - return null; - } - }); + Shape shape = cache.computeIfAbsent(key, k -> targetFuncValues.value()); if (shape != null) { //optimize shape on a cache hit if possible. This must be thread-safe and it is. if (shape instanceof JtsGeometry) { diff --git a/solr/core/src/java/org/apache/solr/search/CaffeineCache.java b/solr/core/src/java/org/apache/solr/search/CaffeineCache.java index f12a3a0..cce7c21 100644 --- a/solr/core/src/java/org/apache/solr/search/CaffeineCache.java +++ b/solr/core/src/java/org/apache/solr/search/CaffeineCache.java @@ -17,6 +17,7 @@ package org.apache.solr.search; import java.io.IOException; +import java.io.UncheckedIOException; import java.lang.invoke.MethodHandles; import java.time.Duration; import java.util.Collections; @@ -25,14 +26,16 @@ import java.util.Map; import java.util.Map.Entry; import java.util.Optional; import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.CompletionException; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.Executor; import java.util.concurrent.ExecutorService; import java.util.concurrent.ForkJoinPool; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.LongAdder; -import java.util.function.Function; +import com.github.benmanes.caffeine.cache.AsyncCache; import com.github.benmanes.caffeine.cache.RemovalCause; import com.github.benmanes.caffeine.cache.RemovalListener; import org.apache.lucene.util.Accountable; @@ -40,6 +43,7 @@ import org.apache.lucene.util.RamUsageEstimator; import org.apache.solr.common.SolrException; import org.apache.solr.metrics.MetricsMap; import org.apache.solr.metrics.SolrMetricsContext; +import org.apache.solr.util.IOFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -72,20 +76,28 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V + RamUsageEstimator.shallowSizeOfInstance(CacheStats.class) + 2 * RamUsageEstimator.shallowSizeOfInstance(LongAdder.class); + private static final long RAM_BYTES_PER_FUTURE = RamUsageEstimator.shallowSizeOfInstance(CompletableFuture.class); + private Executor executor; private CacheStats priorStats; + private long priorHits; private long priorInserts; + private long priorLookups; private String description = "Caffeine Cache"; + private LongAdder hits; private LongAdder inserts; + private LongAdder lookups; private Cache<K,V> cache; + private AsyncCache<K, V> asyncCache; private long warmupTime; private int maxSize; private long maxRamBytes; private int initialSize; private int maxIdleTimeSec; private boolean cleanupThread; + private boolean async; private Set<String> metricNames = ConcurrentHashMap.newKeySet(); private MetricsMap cacheMap; @@ -103,7 +115,7 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V super.init(args, regenerator); String str = args.get(SIZE_PARAM); maxSize = (str == null) ? 1024 : Integer.parseInt(str); - str = args.get("initialSize"); + str = args.get(INITIAL_SIZE_PARAM); initialSize = Math.min((str == null) ? 1024 : Integer.parseInt(str), maxSize); str = args.get(MAX_IDLE_TIME_PARAM); if (str == null) { @@ -114,9 +126,12 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V str = args.get(MAX_RAM_MB_PARAM); int maxRamMB = str == null ? -1 : Double.valueOf(str).intValue(); maxRamBytes = maxRamMB < 0 ? Long.MAX_VALUE : maxRamMB * 1024L * 1024L; - str = args.get(CLEANUP_THREAD_PARAM); - cleanupThread = str != null && Boolean.parseBoolean(str); - if (cleanupThread) { + cleanupThread = Boolean.parseBoolean(args.get(CLEANUP_THREAD_PARAM)); + async = Boolean.parseBoolean(args.get(ASYNC_PARAM)); + if (async) { + // We record futures in the map to decrease bucket-lock contention, but need computation handled in same thread + executor = Runnable::run; + } else if (cleanupThread) { executor = ForkJoinPool.commonPool(); } else { executor = Runnable::run; @@ -125,7 +140,9 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V description = generateDescription(maxSize, initialSize); cache = buildCache(null); + hits = new LongAdder(); inserts = new LongAdder(); + lookups = new LongAdder(); initialRamBytes = RamUsageEstimator.shallowSizeOfInstance(cache.getClass()) + @@ -150,7 +167,13 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V } else { builder.maximumSize(maxSize); } - Cache<K, V> newCache = builder.build(); + Cache<K, V> newCache; + if (async) { + asyncCache = builder.buildAsync(); + newCache = asyncCache.synchronous(); + } else { + newCache = builder.build(); + } if (prev != null) { newCache.putAll(prev.asMap()); } @@ -176,33 +199,94 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V return cache.getIfPresent(key); } - @Override - public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction) { - return cache.get(key, k -> { - inserts.increment(); - V value = mappingFunction.apply(k); - if (value == null) { - return null; + private V computeAsync(K key, IOFunction<? super K, ? extends V> mappingFunction) throws IOException { + CompletableFuture<V> future = new CompletableFuture<>(); + CompletableFuture<V> result = asyncCache.asMap().putIfAbsent(key, future); + lookups.increment(); + if (result != null) { + try { + // Another thread is already working on this computation, wait for them to finish + V value = result.join(); + hits.increment(); + return value; + } catch (CompletionException e) { + Throwable cause = e.getCause(); + if (cause instanceof IOException) { + // Computation had an IOException, likely index problems, so fail this result too + throw (IOException) cause; + } + if (cause instanceof CancellableCollector.QueryCancelledException) { + // The reserved slot that we were waiting for got cancelled, so we will compute directly + // If we go back to waiting for a new cache result then that can lead to thread starvation + // Should we record a cache miss here? + return mappingFunction.apply(key); + } + throw e; } - ramBytes.add(RamUsageEstimator.sizeOfObject(key, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) + - RamUsageEstimator.sizeOfObject(value, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED)); - ramBytes.add(RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY); + } + try { + // We reserved the slot, so we do the work + V value = mappingFunction.apply(key); + future.complete(value); // This will update the weight and expiration + recordRamBytes(key, null, value); + inserts.increment(); return value; - }); + } catch (RuntimeException | IOException e) { + // TimeExceeded exception is runtime and will bubble up from here + future.completeExceptionally(e); // This will remove the future from the cache + throw e; + } + } + + @Override + public V computeIfAbsent(K key, IOFunction<? super K, ? extends V> mappingFunction) throws IOException { + if (async) { + return computeAsync(key, mappingFunction); + } + + try { + return cache.get(key, k -> { + V value; + try { + value = mappingFunction.apply(k); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + if (value == null) { + return null; + } + recordRamBytes(key, null, value); + inserts.increment(); + return value; + }); + } catch (UncheckedIOException e) { + throw e.getCause(); + } } @Override public V put(K key, V val) { inserts.increment(); V old = cache.asMap().put(key, val); + recordRamBytes(key, old, val); + return old; + } + + /** + * Update the estimate of used memory + * @param key the cache key + * @param oldValue the old cached value to decrement estimate (can be null) + * @param newValue the new cached value to increment estimate + */ + private void recordRamBytes(K key, V oldValue, V newValue) { ramBytes.add(RamUsageEstimator.sizeOfObject(key, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED) + - RamUsageEstimator.sizeOfObject(val, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED)); - if (old != null) { - ramBytes.add(- RamUsageEstimator.sizeOfObject(old, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED)); - } else { + RamUsageEstimator.sizeOfObject(newValue, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED)); + if (oldValue == null) { ramBytes.add(RamUsageEstimator.LINKED_HASHTABLE_RAM_BYTES_PER_ENTRY); + if (async) ramBytes.add(RAM_BYTES_PER_FUTURE); + } else { + ramBytes.add(- RamUsageEstimator.sizeOfObject(oldValue, RamUsageEstimator.QUERY_DEFAULT_RAM_BYTES_USED)); } - return old; } @Override @@ -300,9 +384,8 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V // warm entries if (isAutowarmingOn()) { - Eviction<K, V> policy = other.cache.policy().eviction().get(); int size = autowarm.getWarmCount(other.cache.asMap().size()); - hottest = policy.hottest(size); + hottest = other.cache.policy().eviction().map(p -> p.hottest(size)).orElse(Collections.emptyMap()); } for (Entry<K, V> entry : hottest.entrySet()) { @@ -318,15 +401,20 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V } } + hits.reset(); inserts.reset(); - priorStats = other.cache.stats().plus(other.priorStats); + lookups.reset(); + CacheStats oldStats = other.cache.stats(); + priorStats = oldStats.plus(other.priorStats); + priorHits = oldStats.hitCount() + other.hits.sum() + other.priorHits; priorInserts = other.inserts.sum() + other.priorInserts; + priorLookups = oldStats.requestCount() + other.lookups.sum() + other.priorLookups; warmupTime = TimeUnit.MILLISECONDS.convert(System.nanoTime() - warmingStartTime, TimeUnit.NANOSECONDS); } /** Returns the description of this cache. */ private String generateDescription(int limit, int initialSize) { - return String.format(Locale.ROOT, "TinyLfu Cache(maxSize=%d, initialSize=%d%s)", + return String.format(Locale.ROOT, "Caffeine Cache(maxSize=%d, initialSize=%d%s)", limit, initialSize, isAutowarmingOn() ? (", " + getAutowarmDescription()) : ""); } @@ -364,11 +452,13 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V cacheMap = new MetricsMap(map -> { if (cache != null) { CacheStats stats = cache.stats(); + long hitCount = stats.hitCount() + hits.sum(); long insertCount = inserts.sum(); + long lookupCount = stats.requestCount() + lookups.sum(); - map.put(LOOKUPS_PARAM, stats.requestCount()); - map.put(HITS_PARAM, stats.hitCount()); - map.put(HIT_RATIO_PARAM, stats.hitRate()); + map.put(LOOKUPS_PARAM, lookupCount); + map.put(HITS_PARAM, hitCount); + map.put(HIT_RATIO_PARAM, hitRate(hitCount, lookupCount)); map.put(INSERTS_PARAM, insertCount); map.put(EVICTIONS_PARAM, stats.evictionCount()); map.put(SIZE_PARAM, cache.asMap().size()); @@ -377,13 +467,19 @@ public class CaffeineCache<K, V> extends SolrCacheBase implements SolrCache<K, V map.put(MAX_RAM_MB_PARAM, getMaxRamMB()); CacheStats cumulativeStats = priorStats.plus(stats); - map.put("cumulative_lookups", cumulativeStats.requestCount()); - map.put("cumulative_hits", cumulativeStats.hitCount()); - map.put("cumulative_hitratio", cumulativeStats.hitRate()); + long cumLookups = priorLookups + lookupCount; + long cumHits = priorHits + hitCount; + map.put("cumulative_lookups", cumLookups); + map.put("cumulative_hits", cumHits); + map.put("cumulative_hitratio", hitRate(cumHits, cumLookups)); map.put("cumulative_inserts", priorInserts + insertCount); map.put("cumulative_evictions", cumulativeStats.evictionCount()); } }); solrMetricsContext.gauge(cacheMap, true, scope, getCategory().toString()); } + + private static double hitRate(long hitCount, long lookupCount) { + return lookupCount == 0 ? 1.0 : (double) hitCount / lookupCount; + } } diff --git a/solr/core/src/java/org/apache/solr/search/JoinQuery.java b/solr/core/src/java/org/apache/solr/search/JoinQuery.java index e179aeb..55b72ca 100644 --- a/solr/core/src/java/org/apache/solr/search/JoinQuery.java +++ b/solr/core/src/java/org/apache/solr/search/JoinQuery.java @@ -17,7 +17,6 @@ package org.apache.solr.search; -import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -140,21 +139,9 @@ class JoinQuery extends Query { if (fromRef != null) { final RefCounted<SolrIndexSearcher> ref = fromRef; - info.addCloseHook(new Closeable() { - @Override - public void close() { - ref.decref(); - } - }); + info.addCloseHook(ref::decref); } - - info.addCloseHook(new Closeable() { - @Override - public void close() { - fromCore.close(); - } - }); - + info.addCloseHook(fromCore); } this.toSearcher = searcher; } diff --git a/solr/core/src/java/org/apache/solr/search/SolrCache.java b/solr/core/src/java/org/apache/solr/search/SolrCache.java index 459a927..999559e 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrCache.java +++ b/solr/core/src/java/org/apache/solr/search/SolrCache.java @@ -17,10 +17,10 @@ package org.apache.solr.search; import org.apache.solr.core.SolrInfoBean; +import org.apache.solr.util.IOFunction; import java.io.IOException; import java.util.Map; -import java.util.function.Function; /** @@ -41,6 +41,7 @@ public interface SolrCache<K,V> extends SolrInfoBean { String INITIAL_SIZE_PARAM = "initialSize"; String CLEANUP_THREAD_PARAM = "cleanupThread"; String SHOW_ITEMS_PARAM = "showItems"; + String ASYNC_PARAM = "async"; /** * The initialization routine. Instance specific arguments are passed in @@ -105,8 +106,10 @@ public interface SolrCache<K,V> extends SolrInfoBean { * must NOT attempt to modify any mappings in the cache. * @return existing or newly computed value, null if there was no existing value and * it was not possible to compute a new value (in which case the new mapping won't be created). + * @throws IOException if and only if mappingFunction threw an IOException. + * A cache mapping will not be created in this case */ - public V computeIfAbsent(K key, Function<? super K, ? extends V> mappingFunction); + V computeIfAbsent(K key, IOFunction<? super K, ? extends V> mappingFunction) throws IOException; /** :TODO: copy from Map */ public void clear(); diff --git a/solr/core/src/java/org/apache/solr/search/SolrDocumentFetcher.java b/solr/core/src/java/org/apache/solr/search/SolrDocumentFetcher.java index 11b26cc..23f042e 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrDocumentFetcher.java +++ b/solr/core/src/java/org/apache/solr/search/SolrDocumentFetcher.java @@ -31,7 +31,6 @@ import java.util.LinkedList; import java.util.List; import java.util.Objects; import java.util.Set; -import java.util.concurrent.atomic.AtomicReference; import java.util.function.Predicate; import java.util.function.Supplier; @@ -221,18 +220,7 @@ public class SolrDocumentFetcher { Document d; if (documentCache != null) { final Set<String> getFields = enableLazyFieldLoading ? fields : null; - AtomicReference<IOException> exceptionRef = new AtomicReference<>(); - d = documentCache.computeIfAbsent(i, docId -> { - try { - return docNC(docId, getFields); - } catch (IOException e) { - exceptionRef.set(e); - return null; - } - }); - if (exceptionRef.get() != null) { - throw exceptionRef.get(); - } + d = documentCache.computeIfAbsent(i, docId -> docNC(docId, getFields)); if (d == null) { // failed to retrieve due to an earlier exception, try again? return docNC(i, fields); diff --git a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java index 641d71c..2e6e336 100644 --- a/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java +++ b/solr/core/src/java/org/apache/solr/search/SolrIndexSearcher.java @@ -33,6 +33,7 @@ import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.atomic.AtomicReference; + import com.codahale.metrics.Gauge; import com.google.common.collect.Iterables; import org.apache.lucene.document.Document; @@ -81,6 +82,7 @@ import org.apache.solr.search.stats.StatsSource; import org.apache.solr.uninverting.UninvertingReader; import org.apache.solr.update.IndexFingerprint; import org.apache.solr.update.SolrIndexConfig; +import org.apache.solr.util.IOFunction; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -804,15 +806,52 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI } // only handle positive (non negative) queries - DocSet getPositiveDocSet(Query q) throws IOException { - DocSet answer; - if (filterCache != null) { - answer = filterCache.get(q); - if (answer != null) return answer; + DocSet getPositiveDocSet(Query query) throws IOException { + // TODO duplicated code with getDocSet? + boolean doCache = filterCache != null; + if (query instanceof ExtendedQuery) { + if (!((ExtendedQuery) query).getCache()) { + doCache = false; + } + if (query instanceof WrappedQuery) { + query = ((WrappedQuery) query).getWrappedQuery(); + } } - answer = getDocSetNC(q, null); - if (filterCache != null) filterCache.put(q, answer); - return answer; + + if (doCache) { + return getAndCacheDocSet(query); + } + + return getDocSetNC(query, null); + } + + /** + * Attempt to read the query from the filter cache, if not will compute the result and insert back into the cache + * <p>Callers must ensure that: + * <ul><li>The query is unwrapped + * <li>The query has caching enabled + * <li>The filter cache exists + * @param query the query to compute. + * @return the DocSet answer + */ + private DocSet getAndCacheDocSet(Query query) throws IOException { + assert !(query instanceof WrappedQuery) : "should have unwrapped"; + assert filterCache != null : "must check for caching before calling this method"; + + if (SolrQueryTimeoutImpl.getInstance().isTimeoutEnabled()) { + // If there is a possibility of timeout for this query, then don't reserve a computation slot. Further, we can't + // naively wait for an in progress computation to finish, because if we time out before it does then we won't + // even have partial results to provide. We could possibly wait for the query to finish in parallel with our own + // results and if they complete first use that instead, but we'll leave that to implement later. + DocSet answer = filterCache.get(query); + if (answer != null) { + return answer; + } + answer = getDocSetNC(query, null); + filterCache.put(query, answer); + return answer; + } + return filterCache.computeIfAbsent(query, q -> getDocSetNC(q, null)); } private static Query matchAllDocsQuery = new MatchAllDocsQuery(); @@ -1054,14 +1093,17 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI public DocSet getDocSet(DocsEnumState deState) throws IOException { int largestPossible = deState.termsEnum.docFreq(); boolean useCache = filterCache != null && largestPossible >= deState.minSetSizeCached; - TermQuery key = null; if (useCache) { - key = new TermQuery(new Term(deState.fieldName, deState.termsEnum.term())); - DocSet result = filterCache.get(key); - if (result != null) return result; + TermQuery key = new TermQuery(new Term(deState.fieldName, deState.termsEnum.term())); + return filterCache.computeIfAbsent(key, + (IOFunction<? super Query, ? extends DocSet>) k -> getResult(deState, largestPossible)); } + return getResult(deState, largestPossible); + } + + private DocSet getResult(DocsEnumState deState, int largestPossible) throws IOException { int smallSetSize = DocSetUtil.smallSetSize(maxDoc()); int scratchSize = Math.min(smallSetSize, largestPossible); if (deState.scratch == null || deState.scratch.length < scratchSize) deState.scratch = new int[scratchSize]; @@ -1123,11 +1165,6 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI } else { result = upto == 0 ? DocSet.empty() : new SortedIntDocSet(Arrays.copyOf(docs, upto)); } - - if (useCache) { - filterCache.put(key, result); - } - return result; } @@ -1177,12 +1214,7 @@ public class SolrIndexSearcher extends IndexSearcher implements Closeable, SolrI Query absQ = QueryUtils.getAbs(query); boolean positive = absQ == query; - // note: can't use computeIfAbsent because can be recursive - DocSet absAnswer = filterCache.get(absQ); - if (absAnswer == null) { - absAnswer = getDocSetNC(absQ, null); - filterCache.put(absQ, absAnswer); - } + DocSet absAnswer = getAndCacheDocSet(absQ); if (filter == null) { return positive ? absAnswer : getLiveDocSet().andNot(absAnswer); diff --git a/solr/core/src/java/org/apache/solr/search/facet/UnInvertedField.java b/solr/core/src/java/org/apache/solr/search/facet/UnInvertedField.java index a044c8f..633ec39 100644 --- a/solr/core/src/java/org/apache/solr/search/facet/UnInvertedField.java +++ b/solr/core/src/java/org/apache/solr/search/facet/UnInvertedField.java @@ -24,7 +24,6 @@ import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.atomic.AtomicReference; import org.apache.lucene.index.LeafReader; import org.apache.lucene.index.LeafReaderContext; @@ -36,7 +35,6 @@ import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.CharsRefBuilder; import org.apache.lucene.util.FixedBitSet; import org.apache.solr.common.SolrException; -import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.index.SlowCompositeReaderWrapper; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.TrieField; @@ -605,80 +603,7 @@ public class UnInvertedField extends DocTermOrds { if (cache == null) { return new UnInvertedField(field, searcher); } - AtomicReference<Throwable> throwableRef = new AtomicReference<>(); - UnInvertedField uif = cache.computeIfAbsent(field, f -> { - UnInvertedField newUif; - try { - newUif = new UnInvertedField(field, searcher); - } catch (Throwable t) { - throwableRef.set(t); - newUif = null; - } - return newUif; - }); - if (throwableRef.get() != null) { - rethrowAsSolrException(field, throwableRef.get()); - } - return uif; - - // (ab) if my understanding is correct this whole block tried to mimic the - // semantics of computeIfAbsent - -// Boolean doWait = false; -// synchronized (cache) { -// final Object val = cache.get(field); -// if (val == null || (val instanceof Throwable)) { -// /** -// * We use this place holder object to pull the UninvertedField construction out of the sync -// * so that if many fields are accessed in a short time, the UninvertedField can be -// * built for these fields in parallel rather than sequentially. -// */ -// cache.put(field, uifPlaceholder); -// } else { -// if (val != uifPlaceholder) { -// return (UnInvertedField) val; -// } -// doWait = true; // Someone else has put the place holder in, wait for that to complete. -// } -// } -// while (doWait) { -// try { -// synchronized (cache) { -// final Object val = cache.get(field); -// if (val != uifPlaceholder) { // OK, another thread put this in the cache we should be good. -// if (val instanceof Throwable) { -// rethrowAsSolrException(field, (Throwable) val); -// } else { -// return (UnInvertedField) val; -// } -// } -// cache.wait(); -// } -// } catch (InterruptedException e) { -// rethrowAsSolrException(field, e); -// } -// } -// -// UnInvertedField uif = null; -// try { -// uif = new UnInvertedField(field, searcher); -// }catch(Throwable e) { -// synchronized (cache) { -// cache.put(field, e); // signaling the failure -// cache.notifyAll(); -// } -// rethrowAsSolrException(field, e); -// } -// synchronized (cache) { -// cache.put(field, uif); // Note, this cleverly replaces the placeholder. -// cache.notifyAll(); -// } -// return uif; - } - - protected static void rethrowAsSolrException(String field, Throwable e) { - throw new SolrException(ErrorCode.SERVER_ERROR, - "Exception occured during uninverting " + field, e); + return cache.computeIfAbsent(field, f -> new UnInvertedField(f, searcher)); } // Returns null if not already populated diff --git a/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java b/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java index 151062f..c1557d2 100644 --- a/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java +++ b/solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java @@ -17,6 +17,7 @@ package org.apache.solr.search.join; import java.io.IOException; +import java.io.UncheckedIOException; import java.util.Objects; import org.apache.lucene.index.LeafReaderContext; @@ -90,7 +91,11 @@ public class BlockJoinParentQParser extends FiltersQParser { SolrCache<Query, BitSetProducer> parentCache = request.getSearcher().getCache(CACHE_NAME); // lazily retrieve from solr cache if (parentCache != null) { - return parentCache.computeIfAbsent(query, QueryBitSetProducer::new); + try { + return parentCache.computeIfAbsent(query, QueryBitSetProducer::new); + } catch (IOException e) { + throw new UncheckedIOException(e); // Shouldn't happen because QBSP doesn't throw + } } else { return new QueryBitSetProducer(query); } diff --git a/solr/core/src/java/org/apache/solr/search/join/HashRangeQuery.java b/solr/core/src/java/org/apache/solr/search/join/HashRangeQuery.java index 35267a0..f773086 100644 --- a/solr/core/src/java/org/apache/solr/search/join/HashRangeQuery.java +++ b/solr/core/src/java/org/apache/solr/search/join/HashRangeQuery.java @@ -88,19 +88,15 @@ public class HashRangeQuery extends Query { } IndexReader.CacheKey cacheKey = cacheHelper.getKey(); - synchronized (cacheKey) { - int[] hashes = cache.get(cacheKey); - if (hashes == null) { - hashes = new int[context.reader().maxDoc()]; - SortedDocValues docValues = context.reader().getSortedDocValues(field); - int doc; - while ((doc = docValues.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { - hashes[doc] = hash(docValues); - } - cache.put(cacheKey, hashes); + return cache.computeIfAbsent(cacheKey, ck -> { + int[] hashes = new int[context.reader().maxDoc()]; + SortedDocValues docValues = context.reader().getSortedDocValues(field); + int doc; + while ((doc = docValues.nextDoc()) != DocIdSetIterator.NO_MORE_DOCS) { + hashes[doc] = hash(docValues); } return hashes; - } + }); } private int hash(SortedDocValues docValues) throws IOException { diff --git a/solr/core/src/java/org/apache/solr/util/TestInjection.java b/solr/core/src/java/org/apache/solr/util/TestInjection.java index eee966d..0e62c99 100644 --- a/solr/core/src/java/org/apache/solr/util/TestInjection.java +++ b/solr/core/src/java/org/apache/solr/util/TestInjection.java @@ -149,6 +149,10 @@ public class TestInjection { public volatile static Integer delayInExecutePlanAction=null; + public volatile static Integer delayBeforeCreatingNewDocSet = null; + + public volatile static AtomicInteger countDocSetDelays = new AtomicInteger(0); + public volatile static boolean failInExecutePlanAction = false; /** @@ -190,6 +194,8 @@ public class TestInjection { wrongIndexFingerprint = null; delayBeforeFollowerCommitRefresh = null; delayInExecutePlanAction = null; + delayBeforeCreatingNewDocSet = null; + countDocSetDelays.set(0); failInExecutePlanAction = false; skipIndexWriterCommitOnClose = false; uifOutOfMemoryError = false; @@ -576,6 +582,22 @@ public class TestInjection { return true; } + public static boolean injectDocSetDelay() { + if (delayBeforeCreatingNewDocSet != null) { + countDocSetDelays.incrementAndGet(); + try { + log.info("Pausing DocSet for {}ms", delayBeforeCreatingNewDocSet); + if (log.isDebugEnabled()) { + log.debug("", new Exception("Stack Trace")); + } + Thread.sleep(delayBeforeCreatingNewDocSet); + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + } + return true; + } + static Set<Hook> newSearcherHooks = ConcurrentHashMap.newKeySet(); public interface Hook { diff --git a/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml b/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml index c6b80c7..d46a8b7 100644 --- a/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml +++ b/solr/core/src/test-files/solr/collection1/conf/solrconfig.xml @@ -97,7 +97,8 @@ <filterCache size="512" initialSize="512" - autowarmCount="2"/> + autowarmCount="2" + async="${solr.filterCache.async:false}"/> <queryResultCache size="512" diff --git a/solr/core/src/test/org/apache/solr/request/TestUnInvertedFieldException.java b/solr/core/src/test/org/apache/solr/request/TestUnInvertedFieldException.java index f927baf..b035826 100644 --- a/solr/core/src/test/org/apache/solr/request/TestUnInvertedFieldException.java +++ b/solr/core/src/test/org/apache/solr/request/TestUnInvertedFieldException.java @@ -32,7 +32,6 @@ import org.apache.lucene.util.NamedThreadFactory; import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrException; -import org.apache.solr.common.SolrException.ErrorCode; import org.apache.solr.common.util.ExecutorUtil.MDCAwareThreadPoolExecutor; import org.apache.solr.search.SolrIndexSearcher; import org.apache.solr.search.facet.UnInvertedField; @@ -96,13 +95,9 @@ public class TestUnInvertedFieldException extends SolrTestCaseJ4 { List<Future<UnInvertedField>> futures = initCallables.stream().map((c) -> pool.submit(c)) .collect(Collectors.toList()); for (Future<UnInvertedField> uifuture : futures) { - try { - final UnInvertedField uif = uifuture.get(); - } catch (ExecutionException injection) { - SolrException solrException = (SolrException) injection.getCause(); - assertEquals(ErrorCode.SERVER_ERROR.code, solrException.code()); - assertSame(solrException.getCause().getClass(), OutOfMemoryError.class); - } + ExecutionException injection = assertThrows(ExecutionException.class, uifuture::get); + Throwable root = SolrException.getRootCause(injection); + assertSame(OutOfMemoryError.class, root.getClass()); assertNull(UnInvertedField.checkUnInvertedField(proto.field(), searcher)); } TestInjection.uifOutOfMemoryError = false; diff --git a/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java b/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java index 5eb1828..fc2689f 100644 --- a/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java +++ b/solr/core/src/test/org/apache/solr/search/TestRangeQuery.java @@ -25,18 +25,27 @@ import java.util.HashMap; import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; import com.carrotsearch.hppc.IntHashSet; import org.apache.lucene.util.TestUtil; import org.apache.solr.SolrTestCaseJ4; import org.apache.solr.common.SolrInputDocument; import org.apache.solr.common.params.ModifiableSolrParams; +import org.apache.solr.common.util.ExecutorUtil; +import org.apache.solr.common.util.SolrNamedThreadFactory; +import org.apache.solr.core.SolrCore; import org.apache.solr.request.SolrQueryRequest; +import org.apache.solr.request.SolrRequestHandler; import org.apache.solr.response.ResultContext; import org.apache.solr.response.SolrQueryResponse; import org.apache.solr.schema.FieldType; import org.apache.solr.schema.NumberType; import org.apache.solr.schema.StrField; +import org.apache.solr.util.TestInjection; +import org.junit.After; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; @@ -61,6 +70,13 @@ public class TestRangeQuery extends SolrTestCaseJ4 { assertU(commit()); } + @After + @Override + public void tearDown() throws Exception { + TestInjection.reset(); + super.tearDown(); + } + void addInt(SolrInputDocument doc, int l, int u, String... fields) { int v=0; if (0==l && l==u) { @@ -81,9 +97,12 @@ public class TestRangeQuery extends SolrTestCaseJ4 { for (int i=0; i<nDocs; i++) { SolrInputDocument doc = new SolrInputDocument(); doc.addField("id", ""+i); - proc.process(doc); + if (proc != null) { + proc.process(doc); + } assertU(adoc(doc)); } + assertU(commit()); } @Test @@ -260,14 +279,10 @@ public class TestRangeQuery extends SolrTestCaseJ4 { // sometimes a very small index, sometimes a very large index final int numDocs = random().nextBoolean() ? random().nextInt(50) : atLeast(1000); - createIndex(numDocs, new DocProcessor() { - @Override - public void process(SolrInputDocument doc) { - // 10% of the docs have missing values - if (random().nextInt(10)!=0) addInt(doc, l,u, fields); - } + createIndex(numDocs, doc -> { + // 10% of the docs have missing values + if (random().nextInt(10)!=0) addInt(doc, l,u, fields); }); - assertU(commit()); final int numIters = atLeast(1000); for (int i=0; i < numIters; i++) { @@ -357,7 +372,46 @@ public class TestRangeQuery extends SolrTestCaseJ4 { } } } - + + @Test + public void testRangeQueryWithFilterCache() throws Exception { + TestInjection.delayBeforeCreatingNewDocSet = 500; + + // sometimes a very small index, sometimes a very large index + // final int numDocs = random().nextBoolean() ? random().nextInt(50) : atLeast(1000); + final int numDocs = 99; + createIndex(numDocs, doc -> { + addInt(doc, 0, 0, "foo_i"); + }); + + ExecutorService queryService = ExecutorUtil.newMDCAwareFixedThreadPool(4, new SolrNamedThreadFactory("TestRangeQuery")); + try (SolrCore core = h.getCoreInc()) { + SolrRequestHandler defaultHandler = core.getRequestHandler(""); + + ModifiableSolrParams params = new ModifiableSolrParams(); + params.set("q", "*:*"); + params.add("fq", "id:[0 TO 222]"); // These should all come from FilterCache + + // 10 threads with 4 executors would be enough for 3 waves, or approximately 1500ms of delay + CountDownLatch atLeastOnceCompleted = new CountDownLatch(1); + for (int i = 0; i < 10; i++) { + queryService.submit(() -> { + try (SolrQueryRequest req = req(params)) { + core.execute(defaultHandler, req, new SolrQueryResponse()); + } + atLeastOnceCompleted.countDown(); + }); + } + + queryService.shutdown(); // No more requests will be queued up + atLeastOnceCompleted.await(); // Wait for the first query to complete + assertTrue(queryService.awaitTermination(1, TimeUnit.SECONDS)); // All queries after should be very fast + + assertEquals("Create only one DocSet outside of cache", 1, TestInjection.countDocSetDelays.get()); + } + } + + @Test public void testRangeQueryEndpointTO() throws Exception { assertEquals("[to TO to]", QParser.getParser("[to TO to]", req("df", "text")).getQuery().toString("text")); assertEquals("[to TO to]", QParser.getParser("[to TO TO]", req("df", "text")).getQuery().toString("text")); @@ -377,6 +431,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 { assertEquals("[xx TO to]", QParser.getParser("[xx TO TO]", req("df", "text")).getQuery().toString("text")); } + @Test public void testRangeQueryRequiresTO() throws Exception { assertEquals("{a TO b}", QParser.getParser("{A TO B}", req("df", "text")).getQuery().toString("text")); assertEquals("[a TO b}", QParser.getParser("[A TO B}", req("df", "text")).getQuery().toString("text")); @@ -400,6 +455,7 @@ public class TestRangeQuery extends SolrTestCaseJ4 { expectThrows(SyntaxError.class, () -> QParser.getParser("[A TO]", req("df", "text")).getQuery()); } + @Test public void testCompareTypesRandomRangeQueries() throws Exception { int cardinality = 10000; Map<NumberType,String[]> types = new HashMap<>(); //single and multivalued field types @@ -503,13 +559,9 @@ public class TestRangeQuery extends SolrTestCaseJ4 { private long doRangeQuery(boolean mv, String start, String end, String field, String[] qRange) throws Exception { ModifiableSolrParams params = new ModifiableSolrParams(); params.set("q", "field_" + (mv?"mv_":"sv_") + field + ":" + start + qRange[0] + " TO " + qRange[1] + end); - SolrQueryRequest req = req(params); - try { + try (SolrQueryRequest req = req(params)) { return (long) h.queryAndResponse("", req).getToLog().get("hits"); - } finally { - req.close(); } - } private String[] getRandomRange(int max, String fieldName) { diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrCachePerf.java b/solr/core/src/test/org/apache/solr/search/TestSolrCachePerf.java index f46ba60..63111d1 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSolrCachePerf.java +++ b/solr/core/src/test/org/apache/solr/search/TestSolrCachePerf.java @@ -16,10 +16,13 @@ */ package org.apache.solr.search; +import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.CountDownLatch; import java.util.concurrent.atomic.AtomicBoolean; @@ -30,6 +33,7 @@ import org.apache.solr.metrics.SolrMetricManager; import org.apache.solr.metrics.SolrMetricsContext; import org.junit.Before; import org.junit.Test; +import org.junit.runners.model.MultipleFailureException; /** * @@ -61,8 +65,8 @@ public class TestSolrCachePerf extends SolrTestCaseJ4 { // warm-up int threads = 10; for (int i = 0; i < 10; i++) { - doTestGetPutCompute(new HashMap<String, SummaryStatistics>(), new HashMap<String, SummaryStatistics>(), threads, false); - doTestGetPutCompute(new HashMap<String, SummaryStatistics>(), new HashMap<String, SummaryStatistics>(), threads, true); + doTestGetPutCompute(new HashMap<>(), new HashMap<>(), threads, false); + doTestGetPutCompute(new HashMap<>(), new HashMap<>(), threads, true); } for (int i = 0; i < 100; i++) { doTestGetPutCompute(getPutRatio, getPutTime, threads, false); @@ -104,6 +108,7 @@ public class TestSolrCachePerf extends SolrTestCaseJ4 { CountDownLatch startLatch = new CountDownLatch(1); CountDownLatch stopLatch = new CountDownLatch(numThreads * NUM_KEYS); List<Thread> runners = new ArrayList<>(); + Set<Exception> exceptions = ConcurrentHashMap.newKeySet(); for (int i = 0; i < numThreads; i++) { Thread t = new Thread(() -> { try { @@ -124,11 +129,10 @@ public class TestSolrCachePerf extends SolrTestCaseJ4 { } } Thread.yield(); - stopLatch.countDown(); + stopLatch.countDown(); // Does this need to be in a finally block? } - } catch (InterruptedException e) { - fail(e.toString()); - return; + } catch (InterruptedException | IOException e) { + exceptions.add(e); } }); t.start(); @@ -142,6 +146,9 @@ public class TestSolrCachePerf extends SolrTestCaseJ4 { for (Thread t : runners) { t.join(); } + if (! exceptions.isEmpty()) { + throw new MultipleFailureException(new ArrayList<>(exceptions)); + } long stopTime = System.nanoTime(); Map<String, Object> metrics = cache.getSolrMetricsContext().getMetricsSnapshot(); perImplRatio.addValue( diff --git a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java index 2a90327..39bf5d8 100644 --- a/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java +++ b/solr/core/src/test/org/apache/solr/search/TestSolrQueryParser.java @@ -572,16 +572,16 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 { ); inserts += 3; - assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); + assertEquals("wrong number of inserts", inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals("wrong number of hits", hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); assertJQ(req("q", "doesnotexist2 filter(id:1) filter(qqq_s:X) filter(abcdefg)") , "/response/numFound==2" ); hits += 3; - assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); + assertEquals("wrong number of inserts", inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals("wrong number of hits", hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // make sure normal "fq" parameters also hit the cache the same way assertJQ(req("q", "doesnotexist3", "fq", "id:1", "fq", "qqq_s:X", "fq", "abcdefg") @@ -589,8 +589,8 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 { ); hits += 3; - assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); + assertEquals("wrong number of inserts", inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals("wrong number of hits", hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // try a query deeply nested in a FQ assertJQ(req("q", "*:* doesnotexist4", "fq", "(id:* +(filter(id:1) filter(qqq_s:X) filter(abcdefg)) )") @@ -599,8 +599,8 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 { inserts += 1; // +1 for top level fq hits += 3; - assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); + assertEquals("wrong number of inserts", inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals("wrong number of hits", hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // retry the complex FQ and make sure hashCode/equals works as expected w/ filter queries assertJQ(req("q", "*:* doesnotexist5", "fq", "(id:* +(filter(id:1) filter(qqq_s:X) filter(abcdefg)) )") @@ -608,8 +608,8 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 { ); hits += 1; // top-level fq should have been found. - assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); + assertEquals("wrong number of inserts", inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals("wrong number of hits", hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // try nested filter with multiple top-level args (i.e. a boolean query) @@ -619,8 +619,8 @@ public class TestSolrQueryParser extends SolrTestCaseJ4 { hits += 1; // the inner filter inserts += 1; // the outer filter - assertEquals(inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); - assertEquals(hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); + assertEquals("wrong number of inserts", inserts, ((Long) filterCacheStats.getValue().get("inserts")).longValue()); + assertEquals("wrong number of hits", hits, ((Long) filterCacheStats.getValue().get("hits")).longValue()); // test the score for a filter, and that default score is 0 assertJQ(req("q", "+filter(*:*) +filter(id:1)", "fl", "id,score", "sort", "id asc") diff --git a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java index c563563..6e9f35d 100644 --- a/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java +++ b/solr/core/src/test/org/apache/solr/search/join/BJQParserTest.java @@ -325,9 +325,10 @@ public class BJQParserTest extends SolrTestCaseJ4 { assertEquals("the first lookup gets insert", 1, delta("inserts", parentFilterCache.getValue(), parentsBefore)); - + assertEquals("true join query was not in fqCache", 0L, + delta("hits", filterCache.getValue(), filtersBefore)); assertEquals("true join query is cached in fqCache", 1L, - delta("lookups", filterCache.getValue(), filtersBefore)); + delta("inserts", filterCache.getValue(), filtersBefore)); } private long delta(String key, Map<String,Object> a, Map<String,Object> b) { diff --git a/solr/licenses/caffeine-2.8.4.jar.sha1 b/solr/licenses/caffeine-2.8.4.jar.sha1 deleted file mode 100644 index 813e00d..0000000 --- a/solr/licenses/caffeine-2.8.4.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -e5730b11981406faa28e0912405a0ce7c2d0f377 diff --git a/solr/licenses/caffeine-2.9.2.jar.sha1 b/solr/licenses/caffeine-2.9.2.jar.sha1 new file mode 100644 index 0000000..032dd10 --- /dev/null +++ b/solr/licenses/caffeine-2.9.2.jar.sha1 @@ -0,0 +1 @@ +0a17ed335e0ce2d337750772c0709b79af35a842 diff --git a/solr/solr-ref-guide/src/caches-warming.adoc b/solr/solr-ref-guide/src/caches-warming.adoc index 52678d3..9ba30ce 100644 --- a/solr/solr-ref-guide/src/caches-warming.adoc +++ b/solr/solr-ref-guide/src/caches-warming.adoc @@ -79,6 +79,9 @@ Reasonable values, depending on the query volume and patterns, may lie somewhere The `maxRamMB` attribute limits the maximum amount of memory a cache may consume. When both `size` and `maxRamMB` limits are specified the `maxRamMB` limit will take precedence and the `size` limit will be ignored. +The `async` attribute determines whether the cache stores direct results (`async=false`, the default) or whether it will store indirect references to the computation (`async=true`). Enabling `async` will use more slightly more memory per cache entry, but may result in reduced CPU cycles generating results or doing garbage collection. The most noticable improvements will be seen when there are many queries racing to compute the same results before they are inserted into the cache. In some [...] +This value is most effective on a filter cache, and likely less relevant on the other cache usages. + All caches can be disabled using the parameter `enabled` with a value of `false`. Caches can also be disabled on a query-by-query basis with the `cache` parameter, as described in the section <<common-query-parameters.adoc#cache-local-parameter,cache Local Parameter>>. @@ -118,6 +121,17 @@ Therefore, the `size` parameter is ignored if `maxRamMB` is specified. autowarmCount="128"/> ---- +The filter cache is a good candidate for enabling `async` computation. + +[source,xml] +---- +<filterCache class="solr.CaffeineCache" + size="1024" + autowarmCount="0" + async="true"/> +---- + + === Query Result Cache The `queryResultCache` holds the results of previous searches: ordered lists of document IDs (DocList) based on a query, a sort, and the range of documents requested. diff --git a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java index 1a786fb..f438f35 100644 --- a/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java +++ b/solr/test-framework/src/java/org/apache/solr/SolrTestCaseJ4.java @@ -300,10 +300,11 @@ public abstract class SolrTestCaseJ4 extends SolrTestCase { System.setProperty("solr.v2RealPath", "true"); System.setProperty("zookeeper.forceSync", "no"); System.setProperty("jetty.testMode", "true"); - System.setProperty("enable.update.log", usually() ? "true" : "false"); + System.setProperty("enable.update.log", Boolean.toString(usually())); System.setProperty("tests.shardhandler.randomSeed", Long.toString(random().nextLong())); System.setProperty("solr.clustering.enabled", "false"); System.setProperty("solr.cloud.wait-for-updates-with-stale-state-pause", "500"); + System.setProperty("solr.filterCache.async", Boolean.toString(usually())); System.setProperty("pkiHandlerPrivateKeyPath", SolrTestCaseJ4.class.getClassLoader().getResource("cryptokeys/priv_key512_pkcs8.pem").toExternalForm()); System.setProperty("pkiHandlerPublicKeyPath", SolrTestCaseJ4.class.getClassLoader().getResource("cryptokeys/pub_key512.der").toExternalForm()); diff --git a/versions.lock b/versions.lock index 2aa1e6f..6960cb0 100644 --- a/versions.lock +++ b/versions.lock @@ -12,7 +12,7 @@ com.fasterxml.jackson.core:jackson-core:2.12.3 (11 constraints: 61e504fa) com.fasterxml.jackson.core:jackson-databind:2.12.3 (19 constraints: f3595e4c) com.fasterxml.jackson.dataformat:jackson-dataformat-smile:2.12.3 (2 constraints: ed13c981) com.fasterxml.woodstox:woodstox-core:6.2.4 (2 constraints: 4d1cac9f) -com.github.ben-manes.caffeine:caffeine:2.8.4 (1 constraints: 10051136) +com.github.ben-manes.caffeine:caffeine:2.9.2 (1 constraints: 0f051236) com.github.virtuald:curvesapi:1.06 (1 constraints: db04f530) com.github.zafarkhaja:java-semver:0.9.0 (1 constraints: 0b050636) com.google.api:api-common:1.10.1 (4 constraints: 822c34f6) diff --git a/versions.props b/versions.props index 39f7ee8..2c71a28 100644 --- a/versions.props +++ b/versions.props @@ -8,7 +8,7 @@ com.epam:parso=2.0.11 com.esri.geometry:esri-geometry-api=2.2.0 com.fasterxml.jackson*:*=2.10.1 com.fasterxml.woodstox:woodstox-core=6.2.4 -com.github.ben-manes.caffeine:caffeine=2.8.4 +com.github.ben-manes.caffeine:caffeine=2.9.2 com.github.virtuald:curvesapi=1.06 com.github.zafarkhaja:java-semver=0.9.0 com.google.api-client:google-api-client=1.29.2