dsmiley commented on code in PR #3418:
URL: https://github.com/apache/solr/pull/3418#discussion_r2263682221


##########
solr/core/src/java/org/apache/solr/handler/component/CombinedQueryComponent.java:
##########
@@ -0,0 +1,587 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.solr.handler.component;
+
+import java.io.IOException;
+import java.io.PrintWriter;
+import java.io.StringWriter;
+import java.lang.invoke.MethodHandles;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Objects;
+import java.util.Set;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.stream.Collectors;
+import org.apache.lucene.search.Explanation;
+import org.apache.lucene.search.Query;
+import org.apache.lucene.search.Sort;
+import org.apache.lucene.search.SortField;
+import org.apache.solr.client.solrj.SolrServerException;
+import org.apache.solr.common.SolrDocument;
+import org.apache.solr.common.SolrDocumentList;
+import org.apache.solr.common.SolrException;
+import org.apache.solr.common.params.CombinerParams;
+import org.apache.solr.common.params.CursorMarkParams;
+import org.apache.solr.common.params.ShardParams;
+import org.apache.solr.common.params.SolrParams;
+import org.apache.solr.common.util.NamedList;
+import org.apache.solr.common.util.SimpleOrderedMap;
+import org.apache.solr.common.util.StrUtils;
+import org.apache.solr.core.SolrCore;
+import org.apache.solr.response.BasicResultContext;
+import org.apache.solr.response.ResultContext;
+import org.apache.solr.response.SolrQueryResponse;
+import org.apache.solr.schema.IndexSchema;
+import org.apache.solr.schema.SchemaField;
+import org.apache.solr.search.DocListAndSet;
+import org.apache.solr.search.QueryResult;
+import org.apache.solr.search.SolrReturnFields;
+import org.apache.solr.search.SortSpec;
+import org.apache.solr.search.combine.QueryAndResponseCombiner;
+import org.apache.solr.search.combine.ReciprocalRankFusion;
+import org.apache.solr.util.SolrResponseUtil;
+import org.apache.solr.util.plugin.SolrCoreAware;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * The CombinedQueryComponent class extends QueryComponent and provides 
support for executing
+ * multiple queries and combining their results.
+ */
+public class CombinedQueryComponent extends QueryComponent implements 
SolrCoreAware {
+
+  public static final String COMPONENT_NAME = "combined_query";
+  protected NamedList<?> initParams;
+  private Map<String, QueryAndResponseCombiner> combiners = new 
ConcurrentHashMap<>();
+  private int maxCombinerQueries;
+  private static final Logger log = 
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
+
+  @Override
+  public void init(NamedList<?> args) {
+    super.init(args);
+    this.initParams = args;
+    this.maxCombinerQueries = CombinerParams.DEFAULT_MAX_COMBINER_QUERIES;
+  }
+
+  @Override
+  public void inform(SolrCore core) {
+    if (initParams != null && initParams.size() > 0) {
+      log.info("Initializing CombinedQueryComponent");
+      NamedList<?> all = (NamedList<?>) initParams.get("combiners");
+      for (int i = 0; i < all.size(); i++) {
+        String name = all.getName(i);
+        NamedList<?> combinerConfig = (NamedList<?>) all.getVal(i);
+        String className = (String) combinerConfig.get("class");
+        QueryAndResponseCombiner combiner =
+            core.getResourceLoader().newInstance(className, 
QueryAndResponseCombiner.class);
+        combiner.init(combinerConfig);
+        combiners.compute(
+            name,
+            (k, existingCombiner) -> {
+              if (existingCombiner == null) {
+                return combiner;
+              }
+              throw new SolrException(
+                  SolrException.ErrorCode.BAD_REQUEST,
+                  "Found more than one combiner with same name");
+            });
+      }
+      Object maxQueries = initParams.get("maxCombinerQueries");
+      if (maxQueries != null) {
+        this.maxCombinerQueries = Integer.parseInt(maxQueries.toString());
+      }
+    }
+    combiners.computeIfAbsent(
+        CombinerParams.RECIPROCAL_RANK_FUSION,
+        key -> {
+          ReciprocalRankFusion reciprocalRankFusion = new 
ReciprocalRankFusion();
+          reciprocalRankFusion.init(initParams);
+          return reciprocalRankFusion;
+        });
+  }
+
+  /**
+   * Overrides the prepare method to handle combined queries.
+   *
+   * @param rb the ResponseBuilder to prepare
+   * @throws IOException if an I/O error occurs during preparation
+   */
+  @Override
+  public void prepare(ResponseBuilder rb) throws IOException {
+    if (rb instanceof CombinedQueryResponseBuilder crb) {
+      SolrParams params = crb.req.getParams();
+      String[] queriesToCombineKeys = 
params.getParams(CombinerParams.COMBINER_QUERY);
+      if (queriesToCombineKeys.length > maxCombinerQueries) {
+        throw new SolrException(
+            SolrException.ErrorCode.BAD_REQUEST,
+            "Too many queries to combine: limit is " + maxCombinerQueries);
+      }
+      for (String queryKey : queriesToCombineKeys) {
+        final var unparsedQuery = params.get(queryKey);
+        ResponseBuilder rbNew = new ResponseBuilder(rb.req, new 
SolrQueryResponse(), rb.components);
+        rbNew.setQueryString(unparsedQuery);
+        super.prepare(rbNew);
+        crb.responseBuilders.add(rbNew);
+      }
+    }
+    super.prepare(rb);
+  }
+
+  /**
+   * Overrides the process method to handle CombinedQueryResponseBuilder 
instances. This method
+   * processes the responses from multiple shards, combines them using the 
specified
+   * QueryAndResponseCombiner strategy, and sets the appropriate results and 
metadata in the
+   * CombinedQueryResponseBuilder.
+   *
+   * @param rb the ResponseBuilder object to process
+   * @throws IOException if an I/O error occurs during processing
+   */
+  @Override
+  public void process(ResponseBuilder rb) throws IOException {
+    if (rb instanceof CombinedQueryResponseBuilder crb) {
+      boolean partialResults = false;
+      boolean segmentTerminatedEarly = false;
+      boolean setMaxHitsTerminatedEarly = false;
+      List<QueryResult> queryResults = new ArrayList<>();
+      for (ResponseBuilder rbNow : crb.responseBuilders) {
+        // propagating from global ResponseBuilder, so that if in case cursor 
is needed for
+        // retrieving the next batch of documents
+        // which might have duplicate results from the previous cursorMark as 
we are dealing with
+        // multiple queries
+        rbNow.setCursorMark(crb.getCursorMark());
+        super.process(rbNow);
+        DocListAndSet docListAndSet = rbNow.getResults();
+        QueryResult queryResult = new QueryResult();
+        queryResult.setDocListAndSet(docListAndSet);
+        queryResults.add(queryResult);
+        partialResults |= queryResult.isPartialResults();
+        if (queryResult.getSegmentTerminatedEarly() != null) {
+          segmentTerminatedEarly |= queryResult.getSegmentTerminatedEarly();
+        }
+        if (queryResult.getMaxHitsTerminatedEarly() != null) {
+          setMaxHitsTerminatedEarly |= queryResult.getMaxHitsTerminatedEarly();
+        }
+      }
+      String algorithm =
+          rb.req
+              .getParams()
+              .get(CombinerParams.COMBINER_ALGORITHM, 
CombinerParams.DEFAULT_COMBINER);
+      QueryAndResponseCombiner combinerStrategy =
+          QueryAndResponseCombiner.getImplementation(algorithm, combiners);
+      QueryResult combinedQueryResult = combinerStrategy.combine(queryResults, 
rb.req.getParams());
+      combinedQueryResult.setPartialResults(partialResults);
+      combinedQueryResult.setSegmentTerminatedEarly(segmentTerminatedEarly);
+      combinedQueryResult.setMaxHitsTerminatedEarly(setMaxHitsTerminatedEarly);
+      crb.setResult(combinedQueryResult);
+      if (rb.isDebug()) {
+        String[] queryKeys = 
rb.req.getParams().getParams(CombinerParams.COMBINER_QUERY);
+        List<Query> queries = 
crb.responseBuilders.stream().map(ResponseBuilder::getQuery).toList();
+        NamedList<Explanation> explanations =
+            combinerStrategy.getExplanations(
+                queryKeys,
+                queries,
+                queryResults,
+                rb.req.getSearcher(),
+                rb.req.getSchema(),
+                rb.req.getParams());
+        rb.addDebugInfo("combinerExplanations", explanations);
+      }
+      ResultContext ctx = new BasicResultContext(crb);
+      crb.rsp.addResponse(ctx);
+      crb.rsp
+          .getToLog()
+          .add(
+              "hits",
+              crb.getResults() == null || crb.getResults().docList == null
+                  ? 0
+                  : crb.getResults().docList.matches());
+      if (!crb.req.getParams().getBool(ShardParams.IS_SHARD, false)) {
+        // for non-distributed request
+        if (null != crb.getNextCursorMark()) {
+          crb.rsp.add(
+              CursorMarkParams.CURSOR_MARK_NEXT,
+              
crb.responseBuilders.getFirst().getNextCursorMark().getSerializedTotem());
+        }
+      }
+
+      if (crb.mergeFieldHandler != null) {
+        crb.mergeFieldHandler.handleMergeFields(crb, crb.req.getSearcher());
+      } else {
+        doFieldSortValues(rb, crb.req.getSearcher());
+      }
+      doPrefetch(crb);
+    } else {
+      super.process(rb);
+    }
+  }
+
+  @Override
+  protected void mergeIds(ResponseBuilder rb, ShardRequest sreq) {
+    List<MergeStrategy> mergeStrategies = rb.getMergeStrategies();
+    if (mergeStrategies != null) {
+      mergeStrategies.sort(MergeStrategy.MERGE_COMP);
+      boolean idsMerged = false;
+      for (MergeStrategy mergeStrategy : mergeStrategies) {
+        mergeStrategy.merge(rb, sreq);
+        if (mergeStrategy.mergesIds()) {
+          idsMerged = true;
+        }
+      }
+
+      if (idsMerged) {
+        return; // ids were merged above so return.
+      }
+    }
+
+    SortSpec ss = rb.getSortSpec();
+
+    // If the shard request was also used to get fields (along with the 
scores), there is no reason
+    // to copy over the score dependent fields, since those will already exist 
in the document with
+    // the return fields
+    Set<String> scoreDependentFields;
+    if ((sreq.purpose & ShardRequest.PURPOSE_GET_FIELDS) == 0) {
+      scoreDependentFields =
+          
rb.rsp.getReturnFields().getScoreDependentReturnFields().keySet().stream()
+              .filter(field -> !field.equals(SolrReturnFields.SCORE))
+              .collect(Collectors.toSet());
+    } else {
+      scoreDependentFields = Collections.emptySet();
+    }
+
+    IndexSchema schema = rb.req.getSchema();
+    SchemaField uniqueKeyField = schema.getUniqueKeyField();
+
+    // id to shard mapping, to eliminate any accidental dups
+    HashMap<Object, String> uniqueDoc = new HashMap<>();
+
+    NamedList<Object> shardInfo = null;
+    if (rb.req.getParams().getBool(ShardParams.SHARDS_INFO, false)) {
+      shardInfo = new SimpleOrderedMap<>();
+      rb.rsp.getValues().add(ShardParams.SHARDS_INFO, shardInfo);
+    }
+
+    long numFound = 0;
+    boolean hitCountIsExact = true;
+    Float maxScore = null;
+    boolean thereArePartialResults = false;
+    Boolean segmentTerminatedEarly = null;
+    boolean maxHitsTerminatedEarly = false;
+    long approximateTotalHits = 0;
+    int failedShardCount = 0;
+    Map<String, List<ShardDoc>> shardDocMap = new HashMap<>();
+    for (ShardResponse srsp : sreq.responses) {

Review Comment:
   It's *very* common for `srsp` to be named this.  I think consistency with 
common names throughtout the codebase is good, even if it isn't the name we 
might have chosen on a green field project.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to