maedhroz commented on code in PR #2409: URL: https://github.com/apache/cassandra/pull/2409#discussion_r1230187587
########## src/java/org/apache/cassandra/index/sai/disk/v1/bbtree/BlockBalancedTreeWriter.java: ########## @@ -0,0 +1,779 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.index.sai.disk.v1.bbtree; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.function.IntFunction; + +import com.google.common.base.MoreObjects; + +import org.apache.cassandra.index.sai.disk.io.RAMIndexOutput; +import org.apache.cassandra.index.sai.disk.v1.SAICodecUtils; +import org.apache.lucene.store.DataOutput; +import org.apache.lucene.store.GrowableByteArrayDataOutput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.util.ArrayUtil; +import org.apache.lucene.util.BytesRef; +import org.apache.lucene.util.FutureArrays; +import org.apache.lucene.util.IntroSorter; +import org.apache.lucene.util.Sorter; +import org.apache.lucene.util.bkd.BKDWriter; +import org.apache.lucene.util.bkd.MutablePointsReaderUtils; + +/** + * This is a specialisation of the Lucene {@link BKDWriter} that only writes a single dimension + * balanced tree. + * <p> + * Recursively builds a block balanced tree to assign all incoming points to smaller + * and smaller rectangles (cells) until the number of points in a given + * rectangle is <= <code>maxPointsInLeafNode</code>. The tree is + * fully balanced, which means the leaf nodes will have between 50% and 100% of + * the requested <code>maxPointsInLeafNode</code>. Values that fall exactly + * on a cell boundary may be in either cell. + * + * <p> + * <b>NOTE</b>: This can write at most Integer.MAX_VALUE * <code>maxPointsInLeafNode</code> total points. + * <p> + * @see BKDWriter + */ +public class BlockBalancedTreeWriter +{ + // Enable to check that values are added to the tree in correct order and within bounds + private static final boolean DEBUG = true; + + // Default maximum number of point in each leaf block + public static final int DEFAULT_MAX_POINTS_IN_LEAF_NODE = 1024; + + private final int bytesPerValue; + private final BytesRef scratchBytesRef1 = new BytesRef(); + private final int maxPointsInLeafNode; + private final byte[] minPackedValue; + private final byte[] maxPackedValue; + private long pointCount; + private final long maxDoc; + + public BlockBalancedTreeWriter(long maxDoc, int bytesPerValue, int maxPointsInLeafNode) + { + if (maxPointsInLeafNode <= 0) + throw new IllegalArgumentException("maxPointsInLeafNode must be > 0; got " + maxPointsInLeafNode); + if (maxPointsInLeafNode > ArrayUtil.MAX_ARRAY_LENGTH) + throw new IllegalArgumentException("maxPointsInLeafNode must be <= ArrayUtil.MAX_ARRAY_LENGTH (= " + + ArrayUtil.MAX_ARRAY_LENGTH + "); got " + maxPointsInLeafNode); + + this.maxPointsInLeafNode = maxPointsInLeafNode; + this.bytesPerValue = bytesPerValue; + this.maxDoc = maxDoc; + + minPackedValue = new byte[bytesPerValue]; + maxPackedValue = new byte[bytesPerValue]; + } + + public long getPointCount() + { + return pointCount; + } + + public int getBytesPerValue() + { + return bytesPerValue; + } + + public int getMaxPointsInLeafNode() + { + return maxPointsInLeafNode; + } + + /** + * Write the point values from a {@link IntersectingPointValues}. The points can be reordered before writing + * to disk and does not use transient disk for reordering. + * <p> + * Visual representation of the disk format: + * <pre> + * + * +========+=======================================+==================+========+ + * | HEADER | LEAF BLOCK LIST | BALANCED TREE | FOOTER | + * +========+================+=====+================+==================+========+ + * | LEAF BLOCK (0) | ... | LEAF BLOCK (N) | VALUES PER LEAF | + * +----------------+-----+----------------+------------------| + * | ORDER INDEX | | BYTES PER VALUE | + * +----------------+ +------------------+ + * | PREFIX | | NUMBER OF LEAVES | + * +----------------+ +------------------+ + * | VALUES | | MINIMUM VALUE | + * +----------------+ +------------------+ + * | MAXIMUM VALUE | + * +------------------+ + * | TOTAL VALUES | + * +------------------+ + * | INDEX TREE | + * +--------+---------+ + * | LENGTH | BYTES | + * +--------+---------+ + * </pre> + * + * @param treeOutput The {@link IndexOutput} to write the balanced tree to + * @param reader The {@link IntersectingPointValues} containing the values and rowIDs to be written + * @param callback The {@link Callback} used to record the leaf postings for each leaf + * + * @return The file pointer to the beginning of the balanced tree + */ + public long writeTree(IndexOutput treeOutput, IntersectingPointValues reader, + final Callback callback) throws IOException + { + SAICodecUtils.writeHeader(treeOutput); + + // We are only ever dealing with one dimension, so we can sort the points in ascending order + // and write out the values + if (reader.needsSorting()) + MutablePointsReaderUtils.sort(Math.toIntExact(maxDoc), bytesPerValue, reader, 0, Math.toIntExact(reader.size())); + + TreeWriter treeWriter = new TreeWriter(treeOutput, callback); + + reader.intersect((rowID, packedValue) -> treeWriter.add(packedValue, rowID)); + + pointCount = treeWriter.finish(); + + long filePointer = pointCount == 0 ? -1 : treeOutput.getFilePointer(); + + writeIndex(treeOutput, maxPointsInLeafNode, treeWriter.leafBlockStartValues, treeWriter.leafBlockFilePointer); + + SAICodecUtils.writeFooter(treeOutput); + + return filePointer; + } + + private void writeIndex(IndexOutput out, int countPerLeaf, List<byte[]> leafBlockStartValues, List<Long> leafBlockFilePointer) throws IOException + { + int numInnerNodes = leafBlockStartValues.size(); + byte[] splitPackedValues = new byte[(1 + numInnerNodes) * (1 + bytesPerValue)]; + rotateToTree(1, 0, numInnerNodes, splitPackedValues, leafBlockStartValues); + long[] leafBlockFPs = leafBlockFilePointer.stream().mapToLong(l -> l).toArray(); + byte[] packedIndex = packIndex(leafBlockFPs, splitPackedValues); + + out.writeVInt(countPerLeaf); + out.writeVInt(bytesPerValue); + + assert leafBlockFPs.length > 0; + out.writeVInt(leafBlockFPs.length); + + out.writeBytes(minPackedValue, 0, bytesPerValue); + out.writeBytes(maxPackedValue, 0, bytesPerValue); + + out.writeVLong(pointCount); + + out.writeVInt(packedIndex.length); + out.writeBytes(packedIndex, 0, packedIndex.length); + } + + private void rotateToTree(int nodeID, int offset, int count, byte[] index, List<byte[]> leafBlockStartValues) + { + if (count == 1) + { + // Leaf index node + System.arraycopy(leafBlockStartValues.get(offset), 0, index, nodeID * (1 + bytesPerValue) + 1, bytesPerValue); + } + else if (count > 1) + { + // Internal index node: binary partition of count + int countAtLevel = 1; + int totalCount = 0; + while (true) + { + int countLeft = count - totalCount; + if (countLeft <= countAtLevel) + { + // This is the last level, possibly partially filled: + int lastLeftCount = Math.min(countAtLevel / 2, countLeft); + assert lastLeftCount >= 0; + int leftHalf = (totalCount - 1) / 2 + lastLeftCount; + + int rootOffset = offset + leftHalf; + + System.arraycopy(leafBlockStartValues.get(rootOffset), 0, index, nodeID * (1 + bytesPerValue) + 1, bytesPerValue); + + // TODO: we could optimize/specialize, when we know it's simply fully balanced binary tree + // under here, to save this while loop on each recursion + + // Recurse left + rotateToTree(2 * nodeID, offset, leftHalf, index, leafBlockStartValues); + + // Recurse right + rotateToTree(2 * nodeID + 1, rootOffset + 1, count - leftHalf - 1, index, leafBlockStartValues); + return; + } + totalCount += countAtLevel; + countAtLevel *= 2; + } + } + else + { + assert count == 0; + } + } + + /** Packs the two arrays, representing a balanced binary tree, into a compact byte[] structure. */ + private byte[] packIndex(long[] leafBlockFPs, byte[] splitPackedValues) throws IOException + { + int numLeaves = leafBlockFPs.length; + + // Possibly rotate the leaf block FPs, if the index is not a fully balanced binary tree (only happens + // if it was created by TreeWriter). In this case the leaf nodes may straddle the two bottom + // levels of the binary tree: + if (numLeaves > 1) + { + int levelCount = 2; + while (true) + { + if (numLeaves >= levelCount && numLeaves <= 2 * levelCount) + { + int lastLevel = 2 * (numLeaves - levelCount); + assert lastLevel >= 0; + if (lastLevel != 0) + { + // Last level is partially filled, so we must rotate the leaf FPs to match. We do this here, after loading + // at read-time, so that we can still delta code them on disk at write: + long[] newLeafBlockFPs = new long[numLeaves]; + System.arraycopy(leafBlockFPs, lastLevel, newLeafBlockFPs, 0, leafBlockFPs.length - lastLevel); + System.arraycopy(leafBlockFPs, 0, newLeafBlockFPs, leafBlockFPs.length - lastLevel, lastLevel); + leafBlockFPs = newLeafBlockFPs; + } + break; + } + + levelCount *= 2; + } + } + + // Reused while packing the index + try (RAMIndexOutput writeBuffer = new RAMIndexOutput("PackedIndex")) + { + // This is the "file" we append the byte[] to: + List<byte[]> blocks = new ArrayList<>(); + byte[] lastSplitValues = new byte[bytesPerValue]; + int totalSize = recursePackIndex(writeBuffer, leafBlockFPs, splitPackedValues, 0, blocks, 1, lastSplitValues, new boolean[1], false); + // Compact the byte[] blocks into single byte index: + byte[] index = new byte[totalSize]; + int upto = 0; + for (byte[] block : blocks) + { + System.arraycopy(block, 0, index, upto, block.length); + upto += block.length; + } + assert upto == totalSize; + + return index; + } + } + + /** + * lastSplitValues is per-dimension split value previously seen; we use this to prefix-code the split byte[] on each + * inner node + */ + private int recursePackIndex(RAMIndexOutput writeBuffer, long[] leafBlockFPs, byte[] splitPackedValues, long minBlockFP, List<byte[]> blocks, + int nodeID, byte[] lastSplitValues, boolean[] negativeDeltas, boolean isLeft) throws IOException + { + if (nodeID >= leafBlockFPs.length) + { + int leafID = nodeID - leafBlockFPs.length; + + // In the unbalanced case it's possible the left most node only has one child: + if (leafID < leafBlockFPs.length) + { + long delta = leafBlockFPs[leafID] - minBlockFP; + if (isLeft) + { + assert delta == 0; + return 0; + } + else + { + assert nodeID == 1 || delta > 0 : "nodeID=" + nodeID; + writeBuffer.writeVLong(delta); + return appendBlock(writeBuffer, blocks); + } + } + else + { + return 0; Review Comment: nit: Untested code path? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]

