stefanvodita commented on code in PR #14439:
URL: https://github.com/apache/lucene/pull/14439#discussion_r2054858596


##########
lucene/benchmark-jmh/src/java/org/apache/lucene/benchmark/jmh/HistogramCollectorBenchmark.java:
##########
@@ -0,0 +1,130 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.benchmark.jmh;
+
+import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.util.Comparator;
+import java.util.Random;
+import java.util.concurrent.TimeUnit;
+import java.util.stream.Stream;
+import org.apache.lucene.document.Document;
+import org.apache.lucene.document.LongPoint;
+import org.apache.lucene.document.NumericDocValuesField;
+import org.apache.lucene.index.DirectoryReader;
+import org.apache.lucene.index.IndexReader;
+import org.apache.lucene.index.IndexWriter;
+import org.apache.lucene.index.IndexWriterConfig;
+import 
org.apache.lucene.sandbox.facet.plain.histograms.HistogramCollectorManager;
+import org.apache.lucene.search.IndexSearcher;
+import org.apache.lucene.search.MatchAllDocsQuery;
+import org.apache.lucene.store.Directory;
+import org.apache.lucene.store.MMapDirectory;
+import org.openjdk.jmh.annotations.Benchmark;
+import org.openjdk.jmh.annotations.BenchmarkMode;
+import org.openjdk.jmh.annotations.Fork;
+import org.openjdk.jmh.annotations.Level;
+import org.openjdk.jmh.annotations.Measurement;
+import org.openjdk.jmh.annotations.Mode;
+import org.openjdk.jmh.annotations.OutputTimeUnit;
+import org.openjdk.jmh.annotations.Param;
+import org.openjdk.jmh.annotations.Scope;
+import org.openjdk.jmh.annotations.Setup;
+import org.openjdk.jmh.annotations.State;
+import org.openjdk.jmh.annotations.TearDown;
+import org.openjdk.jmh.annotations.Warmup;
+
+@State(Scope.Thread)
+@BenchmarkMode(Mode.Throughput)
+@OutputTimeUnit(TimeUnit.SECONDS)
+@Fork(value = 1, warmups = 1)
+@Warmup(iterations = 1, time = 1)
+@Measurement(iterations = 3, time = 3)
+public class HistogramCollectorBenchmark {

Review Comment:
   I like this benchmark!



##########
lucene/sandbox/src/java/org/apache/lucene/sandbox/facet/plain/histograms/PointTreeBulkCollector.java:
##########
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.sandbox.facet.plain.histograms;
+
+import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
+
+import java.io.IOException;
+import java.util.function.Function;
+import org.apache.lucene.index.PointValues;
+import org.apache.lucene.internal.hppc.LongIntHashMap;
+import org.apache.lucene.search.CollectionTerminatedException;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.ArrayUtil;
+
+class PointTreeBulkCollector {
+  static boolean collect(
+      final PointValues pointValues,
+      final long bucketWidth,
+      final LongIntHashMap collectorCounts,
+      final int maxBuckets)
+      throws IOException {
+    // TODO: Do we really need pointValues.getDocCount() == pointValues.size()
+    if (pointValues == null
+        || pointValues.getNumDimensions() != 1
+        || pointValues.getDocCount() != pointValues.size()
+        || ArrayUtil.getValue(pointValues.getBytesPerDimension()) == null) {
+      return false;
+    }
+
+    final Function<byte[], Long> byteToLong =
+        ArrayUtil.getValue(pointValues.getBytesPerDimension());
+    final long minValue = getLongFromByte(byteToLong, 
pointValues.getMinPackedValue());
+    final long maxValue = getLongFromByte(byteToLong, 
pointValues.getMaxPackedValue());
+    long leafMinBucket = Math.floorDiv(minValue, bucketWidth);
+    long leafMaxBucket = Math.floorDiv(maxValue, bucketWidth);
+
+    // We want that # leaf nodes is more than # buckets so that we can 
completely skip over
+    // some of the leaf nodes. Higher this ratio, more efficient it is than 
naive approach!
+    if ((pointValues.size() / 512) < (leafMaxBucket - leafMinBucket)) {
+      return false;

Review Comment:
   I could be misunderstanding, so please bear with me. Let's say a user only 
has `PointValues` indexed and no `DocValues` and they try to use the histogram. 
Would they be able to do that even if their data doesn't fit the efficiency 
condition above?



##########
lucene/core/src/java/org/apache/lucene/util/ArrayUtil.java:
##########
@@ -801,4 +802,16 @@ public static int compareUnsigned4(byte[] a, int aOffset, 
byte[] b, int bOffset)
     return Integer.compareUnsigned(
         (int) BitUtil.VH_BE_INT.get(a, aOffset), (int) 
BitUtil.VH_BE_INT.get(b, bOffset));
   }
+
+  public static Function<byte[], Long> getValue(int numBytes) {

Review Comment:
   I'm happy with how it is now. Thank you!



##########
lucene/sandbox/src/java/org/apache/lucene/sandbox/facet/plain/histograms/PointTreeBulkCollector.java:
##########
@@ -0,0 +1,219 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.sandbox.facet.plain.histograms;
+
+import static org.apache.lucene.search.DocIdSetIterator.NO_MORE_DOCS;
+
+import java.io.IOException;
+import java.util.function.Function;
+import org.apache.lucene.index.PointValues;
+import org.apache.lucene.internal.hppc.LongIntHashMap;
+import org.apache.lucene.search.CollectionTerminatedException;
+import org.apache.lucene.search.DocIdSetIterator;
+import org.apache.lucene.util.ArrayUtil;
+
+class PointTreeBulkCollector {
+  static boolean collect(
+      final PointValues pointValues,
+      final long bucketWidth,
+      final LongIntHashMap collectorCounts,
+      final int maxBuckets)
+      throws IOException {
+    // TODO: Do we really need pointValues.getDocCount() == pointValues.size()
+    if (pointValues == null
+        || pointValues.getNumDimensions() != 1
+        || pointValues.getDocCount() != pointValues.size()
+        || ArrayUtil.getValue(pointValues.getBytesPerDimension()) == null) {
+      return false;

Review Comment:
   Let me be more specific. I'm questioning if the `Expected numeric field, but 
got doc-value type` exception we used to throw still makes sense now or needs 
to be adjusted since we now have another case for `PointValues` which must have 
not worked out to reach the exception.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to