gerlowskija commented on code in PR #3053:
URL: https://github.com/apache/solr/pull/3053#discussion_r2050966911


##########
solr/core/src/java/org/apache/solr/search/grouping/distributed/responseprocessor/TopGroupsShardResponseProcessor.java:
##########
@@ -233,7 +233,7 @@ static void fillResultIds(ResponseBuilder rb) {
     int i = 0;
     for (TopGroups<BytesRef> topGroups : rb.mergedTopGroups.values()) {
       for (GroupDocs<BytesRef> group : topGroups.groups) {
-        for (ScoreDoc scoreDoc : group.scoreDocs) {
+        for (ScoreDoc scoreDoc : group.scoreDocs()) {

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/grouping/distributed/command/TopGroupsFieldCommand.java:
##########
@@ -201,7 +201,7 @@ public void postCollect(IndexSearcher searcher) throws 
IOException {
     }
     if (needScores) {
       for (GroupDocs<?> group : topGroups.groups) {
-        TopFieldCollector.populateScores(group.scoreDocs, searcher, query);
+        TopFieldCollector.populateScores(group.scoreDocs(), searcher, query);

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/grouping/distributed/shardresultserializer/TopGroupsResultTransformer.java:
##########
@@ -222,24 +222,24 @@ protected NamedList<Object> 
serializeTopGroups(TopGroups<BytesRef> data, SchemaF
     SchemaField uniqueField = schema.getUniqueKeyField();
     for (GroupDocs<BytesRef> searchGroup : data.groups) {
       NamedList<Object> groupResult = new NamedList<>();
-      assert searchGroup.totalHits.relation == TotalHits.Relation.EQUAL_TO;
-      groupResult.add("totalHits", searchGroup.totalHits.value);
-      if (!Float.isNaN(searchGroup.maxScore)) {
-        groupResult.add("maxScore", searchGroup.maxScore);
+      assert searchGroup.totalHits().relation() == TotalHits.Relation.EQUAL_TO;

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/join/CrossCollectionJoinQuery.java:
##########
@@ -323,7 +316,7 @@ private DocSet getDocSet() throws IOException {
     }
 
     @Override
-    public Scorer scorer(LeafReaderContext context) throws IOException {
+    public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {

Review Comment:
   ditto, re: "Scorer vs. ScorerSupplier"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050863372)



##########
solr/core/src/java/org/apache/solr/search/mlt/SimpleMLTQParser.java:
##########
@@ -47,7 +47,7 @@ public Query parse() {
 
     try {
       TopDocs td = searcher.search(docIdQuery, 2);
-      if (td.totalHits.value != 1)
+      if (td.totalHits.value() != 1)

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/join/MultiValueTermOrdinalCollector.java:
##########
@@ -55,8 +55,9 @@ public void collect(int doc) throws IOException {
     final int globalDoc = docBase + doc;
 
     if (topLevelDocValues.advanceExact(globalDoc)) {
-      long ord = SortedSetDocValues.NO_MORE_ORDS;
-      while ((ord = topLevelDocValues.nextOrd()) != 
SortedSetDocValues.NO_MORE_ORDS) {
+      long ord;

Review Comment:
   ditto, re: "docValue iteration"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050762921)



##########
solr/core/src/test/org/apache/solr/handler/TestReplicationHandlerBackup.java:
##########
@@ -150,7 +150,7 @@ private void verify(Path backup, int nDocs) throws 
IOException {
         IndexReader reader = DirectoryReader.open(dir)) {
       IndexSearcher searcher = new IndexSearcher(reader);
       TopDocs hits = searcher.search(new MatchAllDocsQuery(), 1);
-      assertEquals(nDocs, hits.totalHits.value);
+      assertEquals(nDocs, hits.totalHits.value());

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/util/SolrPluginUtils.java:
##########
@@ -587,9 +587,9 @@ public static void setMinShouldMatch(BooleanQuery.Builder 
q, String spec, boolea
     int maxDisjunctsSize = 0;
     int optionalDismaxClauses = 0;
     for (BooleanClause c : q.build().clauses()) {
-      if (c.getOccur() == Occur.SHOULD) {
-        if (mmAutoRelax && c.getQuery() instanceof DisjunctionMaxQuery) {
-          int numDisjuncts = ((DisjunctionMaxQuery) 
c.getQuery()).getDisjuncts().size();
+      if (c.occur() == Occur.SHOULD) {

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/test/org/apache/solr/handler/TestStressThreadBackup.java:
##########
@@ -331,7 +331,8 @@ private void validateBackup(final File backup) throws 
IOException {
     final int numRealDocsExpected = Integer.parseInt(m.group());
 
     try (Directory dir = FSDirectory.open(backup.toPath())) {
-      TestUtil.checkIndex(dir, true, true, true, null);
+      //TBD

Review Comment:
   [-1] The "level" parameter for TestUtil.checkIndex [is expected to be 1, 2, 
or 
3](https://github.com/apache/lucene/blob/main/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java#L4499-L4503),
 so I'd expect this to throw an exception and fail the test.



##########
solr/core/src/test/org/apache/solr/BasicFunctionalityTest.java:
##########
@@ -763,7 +763,7 @@ public void testNotLazyField() throws IOException {
     core.execute(core.getRequestHandler(req.getParams().get(CommonParams.QT)), 
req, rsp);
 
     DocList dl = ((ResultContext) rsp.getResponse()).getDocList();
-    Document d = req.getSearcher().doc(dl.iterator().nextDoc());
+    Document d = 
req.getSearcher().storedFields().document(dl.iterator().nextDoc());

Review Comment:
   [INFO] Document field values are now accessed through the `storedFields()` 
method in order to facilitate some Lucene API and tech-debt cleanup, see 
https://github.com/apache/lucene/pull/11998



##########
solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/GroupedEndResultTransformer.java:
##########
@@ -73,30 +73,30 @@ public void transform(
         FieldType groupFieldType = groupField.getType();
         for (GroupDocs<BytesRef> group : topGroups.groups) {
           SimpleOrderedMap<Object> groupResult = new SimpleOrderedMap<>();
-          if (group.groupValue != null) {
+          if (group.groupValue() != null) {

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/join/GraphQuery.java:
##########
@@ -272,19 +261,19 @@ private Automaton buildAutomaton(BytesRefHash 
termBytesHash) {
         termBytesHash.get(i, ref);
         terms.add(ref);
       }
-      final Automaton a = DaciukMihovAutomatonBuilder.build(terms);
+      final Automaton a = Automata.makeStringUnion(terms);

Review Comment:
   [INFO] Older class hidden to shrink Lucene API surface in 
https://github.com/apache/lucene/issues/12321



##########
solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/MainEndResultTransformer.java:
##########
@@ -58,7 +58,7 @@ public void transform(
 
       float maxScore = Float.NEGATIVE_INFINITY;
       for (GroupDocs<BytesRef> group : topGroups.groups) {
-        for (ScoreDoc scoreDoc : group.scoreDocs) {
+        for (ScoreDoc scoreDoc : group.scoreDocs()) {

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/grouping/distributed/requestfactory/StoredFieldsShardRequestFactory.java:
##########
@@ -46,7 +46,7 @@ public ShardRequest[] constructRequest(ResponseBuilder rb) {
     HashMap<String, Set<ShardDoc>> shardMap = new HashMap<>();
     for (TopGroups<BytesRef> topGroups : rb.mergedTopGroups.values()) {
       for (GroupDocs<BytesRef> group : topGroups.groups) {
-        mapShardToDocs(shardMap, group.scoreDocs);
+        mapShardToDocs(shardMap, group.scoreDocs());

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/join/BlockJoinParentQParser.java:
##########
@@ -155,13 +149,13 @@ public Weight createWeight(
         throws IOException {
       return new ConstantScoreWeight(BitSetProducerQuery.this, boost) {
         @Override
-        public Scorer scorer(LeafReaderContext context) throws IOException {
+        public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {

Review Comment:
   ditto, re: "Scorer vs. ScorerSupplier"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050863372)



##########
solr/core/src/java/org/apache/solr/search/join/GraphEdgeCollector.java:
##########
@@ -147,7 +147,8 @@ void addEdgeIdsToResult(int doc) throws IOException {
       if (doc == docTermOrds.docID()) {
         BytesRef edgeValue = new BytesRef();
         long ord;
-        while ((ord = docTermOrds.nextOrd()) != 
SortedSetDocValues.NO_MORE_ORDS) {
+        for (int o=0; o<docTermOrds.docValueCount(); o++) {

Review Comment:
   ditto, re: "docValue iteration"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050762921)



##########
solr/core/src/java/org/apache/solr/search/grouping/endresulttransformer/SimpleEndResultTransformer.java:
##########
@@ -55,7 +55,7 @@ public void transform(
 
         float maxScore = Float.NEGATIVE_INFINITY;
         for (GroupDocs<BytesRef> group : topGroups.groups) {
-          for (ScoreDoc scoreDoc : group.scoreDocs) {
+          for (ScoreDoc scoreDoc : group.scoreDocs()) {

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/join/GraphEdgeCollector.java:
##########
@@ -196,7 +197,7 @@ private Automaton buildAutomaton(BytesRefHash 
termBytesHash) {
         termBytesHash.get(i, ref);
         terms.add(ref);
       }
-      final Automaton a = DaciukMihovAutomatonBuilder.build(terms);
+      final Automaton a = Automata.makeStringUnion(terms);

Review Comment:
   [INFO] Older class hidden to shrink Lucene API surface in 
https://github.com/apache/lucene/issues/12321



##########
solr/core/src/java/org/apache/solr/servlet/SolrRequestParsers.java:
##########
@@ -763,7 +763,7 @@ public SolrParams parseParamsAndFillStreams(
         // Protect container owned streams from being closed by us, see 
SOLR-8933
         in =
             FastInputStream.wrap(
-                in == null ? new CloseShieldInputStream(req.getInputStream()) 
: in);
+                in == null ? new CloseShieldInputStream(req.getInputStream()) 
: new CloseShieldInputStream(in));

Review Comment:
   [Q] This doesn't look like a bad change necessarily, but I'm a little leery 
of bundling in things unrelated to the Lucene version bump.  (Assuming I'm 
right and this is unrelated?)



##########
solr/core/src/java/org/apache/solr/search/join/GraphQuery.java:
##########
@@ -272,19 +261,19 @@ private Automaton buildAutomaton(BytesRefHash 
termBytesHash) {
         termBytesHash.get(i, ref);
         terms.add(ref);
       }
-      final Automaton a = DaciukMihovAutomatonBuilder.build(terms);
+      final Automaton a = Automata.makeStringUnion(terms);
       return a;
     }
 
     @Override
-    public Scorer scorer(LeafReaderContext context) throws IOException {
+    public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {

Review Comment:
   ditto, re: "Scorer vs. ScorerSupplier"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050863372)



##########
solr/core/src/java/org/apache/solr/search/join/GraphQuery.java:
##########
@@ -256,7 +245,7 @@ private DocSet resolveLeafNodes() throws IOException {
       BooleanQuery.Builder leafNodeQuery = new BooleanQuery.Builder();
       Query edgeQuery =
           collectSchemaField.hasDocValues()
-              ? new DocValuesFieldExistsQuery(field)
+              ? new FieldExistsQuery(field)

Review Comment:
   ditto, re: "FieldExistsQuery consolidation"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050770517)



##########
solr/core/src/java/org/apache/solr/search/mlt/AbstractMLTQParser.java:
##########
@@ -136,7 +136,7 @@ protected BooleanQuery parseMLTQuery(Supplier<String[]> 
fieldsFallback, MLTInvok
       
newQ.setMinimumNumberShouldMatch(rawMLTQuery.getMinimumNumberShouldMatch());
 
       for (BooleanClause clause : rawMLTQuery) {
-        Query q = clause.getQuery();
+        Query q = clause.query();

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/search/join/ScoreJoinQParserPlugin.java:
##########
@@ -138,7 +138,7 @@ public Weight createWeight(
         fromCore.close();
         fromHolder.decref();
       }
-      return 
joinQuery.rewrite(searcher.getIndexReader()).createWeight(searcher, scoreMode, 
boost);
+      return joinQuery.rewrite(searcher).createWeight(searcher, scoreMode, 
boost);

Review Comment:
   ditto, re: "rewrite API change"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050789458)



##########
solr/core/src/java/org/apache/solr/update/DeleteByQueryWrapper.java:
##########
@@ -54,12 +49,12 @@ LeafReader wrap(LeafReader reader) {
   // we try to be well-behaved, but we are not (and IW's applyQueryDeletes 
isn't much better...)
 
   @Override
-  public Query rewrite(IndexReader reader) throws IOException {
-    Query rewritten = in.rewrite(reader);
+  public Query rewrite(IndexSearcher searcher) throws IOException {

Review Comment:
   ditto, re: "rewrite API change"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050789458)



##########
solr/core/src/java/org/apache/solr/search/join/HashRangeQuery.java:
##########
@@ -64,7 +56,7 @@ public boolean isCacheable(LeafReaderContext context) {
       }
 
       @Override
-      public Scorer scorer(LeafReaderContext context) throws IOException {
+      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {

Review Comment:
   ditto, re: "Scorer vs. ScorerSupplier"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050863372)



##########
solr/core/src/java/org/apache/solr/spelling/WordBreakSolrSpellChecker.java:
##########
@@ -226,8 +226,8 @@ public SpellingResult getSuggestions(SpellingOptions 
options) throws IOException
     if (combineWords) {
       combineSuggestionList = new ArrayList<>(combineSuggestions.length);
       for (CombineSuggestion cs : combineSuggestions) {
-        int firstTermIndex = cs.originalTermIndexes[0];
-        int lastTermIndex = 
cs.originalTermIndexes[cs.originalTermIndexes.length - 1];
+        int firstTermIndex = cs.originalTermIndexes()[0];

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/java/org/apache/solr/uninverting/UninvertingReader.java:
##########
@@ -285,11 +274,12 @@ public static LeafReader wrap(LeafReader in, 
Function<String, Type> mapping) {
             new FieldInfo(
                 fi.name,
                 fi.number,
-                fi.hasVectors(),
+                fi.hasTermVectors(),
                 fi.omitsNorms(),
                 fi.hasPayloads(),
                 fi.getIndexOptions(),
                 type,
+                DocValuesSkipIndexType.NONE,

Review Comment:
   ditto, re: "sparse index skip list capability"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050785064) that 
expands on this a bit more



##########
solr/core/src/java/org/apache/solr/update/SolrIndexSplitter.java:
##########
@@ -551,7 +544,7 @@ public Weight createWeight(IndexSearcher searcher, 
ScoreMode scoreMode, float bo
       return new ConstantScoreWeight(this, boost) {
 
         @Override
-        public Scorer scorer(LeafReaderContext context) throws IOException {
+        public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {

Review Comment:
   ditto, re: "Scorer vs. ScorerSupplier"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050863372)



##########
solr/core/src/java/org/apache/solr/update/processor/ClassificationUpdateProcessor.java:
##########
@@ -130,7 +130,7 @@ public void processAdd(AddUpdateCommand cmd) throws 
IOException {
           classifier.getClasses(luceneDocument, maxOutputClasses);
       if (assignedClassifications != null) {
         for (ClassificationResult<BytesRef> singleClassification : 
assignedClassifications) {
-          String assignedClass = 
singleClassification.getAssignedClass().utf8ToString();
+          String assignedClass = 
singleClassification.assignedClass().utf8ToString();

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/test-files/solr/collection1/conf/schema_codec.xml:
##########
@@ -20,7 +20,7 @@
        as a way to vet that the configuration actually matters.
   -->
   <fieldType name="string_direct" class="solr.StrField" 
postingsFormat="Direct" docValuesFormat="Asserting" />
-  <fieldType name="string_standard" class="solr.StrField" 
postingsFormat="Lucene912"/>
+  <fieldType name="string_standard" class="solr.StrField" 
postingsFormat="Lucene101"/>

Review Comment:
   [Q] Isn't this the default?  Why specify it here?



##########
solr/core/src/test/org/apache/solr/handler/component/QueryElevationComponentTest.java:
##########
@@ -573,7 +573,7 @@ public void testFqWithCacheAndCostLocalParams() throws 
Exception {
           assertEquals(2, booleanQuery.clauses().size());
           // the first clause is the original query, which is a WrappedQuery 
because the cache=false
           // local param was provided; the WrappedQuery doesn't cache; it 
contains a TermQuery
-          Query firstClause = booleanQuery.clauses().get(0).getQuery();
+          Query firstClause = booleanQuery.clauses().get(0).query();

Review Comment:
   ditto, re: "Lucene movement towards Java 'records'"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050683426)



##########
solr/core/src/test/org/apache/solr/handler/TestSnapshotCoreBackup.java:
##########
@@ -454,7 +454,7 @@ private static void simpleBackupCheck(
           new File(backup, expectedSegmentsFileName).exists());
     }
     try (Directory dir = FSDirectory.open(backup.toPath())) {
-      TestUtil.checkIndex(dir, true, true, true, null);
+      TestUtil.checkIndex(dir, 0, true, true, null);

Review Comment:
   [-1] The "level" parameter for TestUtil.checkIndex [is expected to be 1, 2, 
or 
3](https://github.com/apache/lucene/blob/main/lucene/core/src/java/org/apache/lucene/index/CheckIndex.java#L4499-L4503),
 so I'd expect this to throw an exception and fail the test.



##########
solr/core/src/java/org/apache/solr/update/DeleteByQueryWrapper.java:
##########
@@ -77,8 +72,8 @@ public Explanation explain(LeafReaderContext context, int 
doc) throws IOExceptio
       }
 
       @Override
-      public Scorer scorer(LeafReaderContext context) throws IOException {
-        return inner.scorer(privateContext.getIndexReader().leaves().get(0));
+      public ScorerSupplier scorerSupplier(LeafReaderContext context) throws 
IOException {

Review Comment:
   ditto, re: "Scorer vs. ScorerSupplier"
   
   See [comment 
here](https://github.com/apache/solr/pull/3053#discussion_r2050863372)



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@solr.apache.org
For additional commands, e-mail: issues-h...@solr.apache.org

Reply via email to