This is an automated email from the ASF dual-hosted git repository.

ab pushed a commit to branch jira/solr-15210
in repository https://gitbox.apache.org/repos/asf/solr.git

commit 0725c0951142501002ff298405db0ac43c382d5f
Author: Andrzej Bialecki <[email protected]>
AuthorDate: Mon Mar 22 13:21:13 2021 +0100

    SOLR-15210: Add exportCache to the _default configSet. Improve metrics.
---
 .../apache/solr/handler/export/ExportWriter.java   | 18 +++++++++-
 .../solr/configsets/_default/conf/solrconfig.xml   |  7 ++++
 .../solrj/io/stream/StreamDecoratorTest.java       | 38 ++++++++++------------
 3 files changed, 41 insertions(+), 22 deletions(-)

diff --git 
a/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java 
b/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java
index a40c2f3..56b64ba 100644
--- a/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java
+++ b/solr/core/src/java/org/apache/solr/handler/export/ExportWriter.java
@@ -81,6 +81,7 @@ import org.apache.solr.schema.SchemaField;
 import org.apache.solr.schema.SortableTextField;
 import org.apache.solr.schema.StrField;
 import org.apache.solr.search.*;
+import org.apache.solr.util.RefCounted;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -201,9 +202,24 @@ public class ExportWriter implements SolrCore.RawWriter, 
Closeable {
     if (useHashQuery) {
       useHashQueryCounter.inc();
     }
+    final SolrCore core = req.getCore();
     MetricsMap cacheMetrics = new MetricsMap(map -> {
       // don't use per-instance var here - always check this from the current 
searcher!
-      SolrCache<IndexReader.CacheKey, SolrCache<String, FixedBitSet>> caches = 
req.getSearcher().getCache(SOLR_CACHE_KEY);
+      // 'req' may no longer be valid when this metric is checked, and 
referencing
+      // it here could lead to memory leaks
+      SolrCache<IndexReader.CacheKey, SolrCache<String, FixedBitSet>> caches;
+      // always get the latest searcher
+      RefCounted<SolrIndexSearcher> searcherRef = core.getSearcher();
+      SolrIndexSearcher searcher = searcherRef.get();
+      try {
+        if (searcher != null) {
+          caches = searcher.getCache(SOLR_CACHE_KEY);
+        } else {
+          caches = null;
+        }
+      } finally {
+        searcherRef.decref();
+      }
       map.put("configured", caches != null);
       if (caches != null) {
         map.put("numSegments", caches.size());
diff --git a/solr/server/solr/configsets/_default/conf/solrconfig.xml 
b/solr/server/solr/configsets/_default/conf/solrconfig.xml
index 1243508..6092956 100644
--- a/solr/server/solr/configsets/_default/conf/solrconfig.xml
+++ b/solr/server/solr/configsets/_default/conf/solrconfig.xml
@@ -438,6 +438,13 @@
            autowarmCount="10"
            regenerator="solr.NoOpRegenerator" />
 
+    <!-- custom cache used by the /export handler -->
+    <cache name="exportCache"
+           class="solr.CaffeineCache"
+           size="512"
+           initialSize="0"
+           regenerator="solr.NoOpRegenerator" />
+
     <!-- Field Value Cache
 
          Cache used to hold field values that are quickly accessible
diff --git 
a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
 
b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
index 3b8d3c0..7a1cc89 100644
--- 
a/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
+++ 
b/solr/solrj/src/test/org/apache/solr/client/solrj/io/stream/StreamDecoratorTest.java
@@ -18,12 +18,7 @@ package org.apache.solr.client.solrj.io.stream;
 
 import java.io.IOException;
 import java.lang.invoke.MethodHandles;
-import java.util.ArrayList;
-import java.util.Collections;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Locale;
-import java.util.Map;
+import java.util.*;
 
 import org.apache.lucene.util.LuceneTestCase;
 import org.apache.lucene.util.LuceneTestCase.Slow;
@@ -1417,22 +1412,23 @@ public class StreamDecoratorTest extends 
SolrCloudTestCase {
     SolrClientCache solrClientCache = new SolrClientCache();
     streamContext.setSolrClientCache(solrClientCache);
 
-
-
     try {
-
-      for (int i = 0; i < 5; i++) {
-        log.info("======== RUN {} ============", i);
-        ParallelStream pstream = (ParallelStream) 
streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", 
unique(search(collection1, q=*:*, fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i 
asc\", partitionKeys=\"a_f\", qt=\"/export\"), over=\"a_f\"), workers=\"2\", 
zkHost=\"" + zkHost + "\", sort=\"a_f asc\")");
-        pstream.setStreamContext(streamContext);
-        List<Tuple> tuples = getTuples(pstream);
-        assertEquals(5, tuples.size());
-        assertOrder(tuples, 0, 1, 3, 4, 6);
-
-        //Test the eofTuples
-
-        Map<String, Tuple> eofTuples = pstream.getEofTuples();
-        assert (eofTuples.size() == 2); //There should be an EOF tuple for 
each worker.
+      for (String noCache : Set.of("true", "false")) {
+        for (String useHashQuery : Set.of("false", "true")) {
+          for (int i = 0; i < 2; i++) {
+            log.info("======== RUN {}: noCache={}, useHashQuery={} 
============", i, noCache, useHashQuery);
+            ParallelStream pstream = (ParallelStream) 
streamFactory.constructStream("parallel(" + COLLECTIONORALIAS + ", 
unique(search(collection1, q=*:*, noCache=" + noCache + ",useHashQuery=" + 
useHashQuery + ", fl=\"id,a_s,a_i,a_f\", sort=\"a_f asc, a_i asc\", 
partitionKeys=\"a_f\", qt=\"/export\"), over=\"a_f\"), workers=\"2\", 
zkHost=\"" + zkHost + "\", sort=\"a_f asc\")");
+            pstream.setStreamContext(streamContext);
+            List<Tuple> tuples = getTuples(pstream);
+            assertEquals(5, tuples.size());
+            assertOrder(tuples, 0, 1, 3, 4, 6);
+
+            //Test the eofTuples
+
+            Map<String, Tuple> eofTuples = pstream.getEofTuples();
+            assert (eofTuples.size() == 2); //There should be an EOF tuple for 
each worker.
+          }
+        }
       }
     } finally {
       solrClientCache.close();

Reply via email to