donnerpeter commented on code in PR #11909: URL: https://github.com/apache/lucene/pull/11909#discussion_r1018359280
########## lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/NGramFragmentChecker.java: ########## @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.lucene.analysis.hunspell; + +import java.util.BitSet; +import java.util.Collection; +import java.util.Locale; +import java.util.function.Consumer; + +/** + * A {@link FragmentChecker} based on all character n-grams possible in a certain language, keeping + * them in a relatively memory-efficient, but probabilistic data structure. + * + * @see #fromAllSimpleWords for enumerating the whole dictionary automatically + * @see #fromWords for creating an instance from a precomputed set of all word forms or n-grams + */ +public class NGramFragmentChecker implements FragmentChecker { + private final int n; + private final BitSet hashes; + + private NGramFragmentChecker(int n, BitSet hashes) { + if (n < 2) throw new IllegalArgumentException("N should be more >=2: " + n); + + this.n = n; + this.hashes = hashes; + + if (hashes.cardinality() > hashes.size() * 2 / 3) { + throw new IllegalArgumentException( + "Too many collisions, please report this to [email protected]"); + } + } + + int hashCount() { + return hashes.cardinality(); + } + + @Override + public boolean hasImpossibleFragmentAround(CharSequence word, int start, int end) { + if (word.length() < n) { + return false; + } + int firstIntersectingStart = Math.max(0, start - n + 1); + int lastIntersectingStart = Math.min(end - 1, word.length() - n); + for (int i = firstIntersectingStart; i <= lastIntersectingStart; i++) { + if (!hashes.get(Math.abs(lowCollisionHash(word, i, i + n) % hashes.size()))) { + return true; + } + } + return false; + } + + private static int lowCollisionHash(CharSequence chars, int offset, int end) { + int result = 0; + for (int i = offset; i < end; i++) { + result = 239 * result + chars.charAt(i); + } + return result; + } + + /** + * Iterate the whole dictionary, derive all word forms (using {@link WordFormGenerator}), vary the + * case to get all words acceptable by the spellchecker, and create a fragment checker based on + * their {@code n}-grams. Note that this enumerates only words derivable by suffixes and prefixes. + * If the language has compounds, some n-grams possible via those compounds can be missed. In the + * latter case, consider using {@link #fromWords}. + * + * @param n the length of n-grams + * @param dictionary the dictionary to traverse + * @param checkCanceled an object that's periodically called, allowing to interrupt the traversal + * by throwing an exception + */ + public static NGramFragmentChecker fromAllSimpleWords( + int n, Dictionary dictionary, Runnable checkCanceled) { + BitSet hashes = new BitSet(1 << (7 + n * 3)); Review Comment: It's from the experiment. I've forgot to restrict it, thanks for reminding! And yes, it's currently a single-function Bloom filter, the implementation can be changed later. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
