clintropolis commented on code in PR #12745:
URL: https://github.com/apache/druid/pull/12745#discussion_r914416372


##########
processing/src/main/java/org/apache/druid/frame/field/ComplexFieldWriter.java:
##########
@@ -0,0 +1,81 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.frame.field;
+
+import org.apache.datasketches.memory.WritableMemory;
+import org.apache.druid.segment.BaseObjectColumnValueSelector;
+import org.apache.druid.segment.serde.ComplexMetricSerde;
+
+/**
+ * Wraps a {@link BaseObjectColumnValueSelector} and uses {@link 
ComplexMetricSerde#toBytes} to write complex objects.
+ */
+public class ComplexFieldWriter implements FieldWriter
+{
+  public static final byte NULL_BYTE = 0x00;

Review Comment:
   nit: you explained to me offline the reason for not using the constants from 
`NullHandling`, but it might be worth leaving a comment here about why - the 
values being flipped so that nulls come first in a compare. Also might be nice 
for these to be shared constants across all writers? Other field writers define 
their own copies of this, and the column writers just use inline `0` or `1` 
values...



##########
processing/src/main/java/org/apache/druid/frame/field/StringFieldReader.java:
##########
@@ -0,0 +1,276 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.frame.field;
+
+import com.google.common.base.Predicate;
+import com.google.common.primitives.Ints;
+import org.apache.datasketches.memory.Memory;
+import org.apache.druid.frame.read.FrameReaderUtils;
+import org.apache.druid.java.util.common.ISE;
+import org.apache.druid.java.util.common.StringUtils;
+import org.apache.druid.query.extraction.ExtractionFn;
+import org.apache.druid.query.filter.ValueMatcher;
+import org.apache.druid.query.monomorphicprocessing.RuntimeShapeInspector;
+import org.apache.druid.segment.ColumnValueSelector;
+import org.apache.druid.segment.DimensionSelector;
+import org.apache.druid.segment.DimensionSelectorUtils;
+import org.apache.druid.segment.IdLookup;
+import org.apache.druid.segment.column.ColumnType;
+import org.apache.druid.segment.data.IndexedInts;
+import org.apache.druid.segment.data.RangeIndexedInts;
+
+import javax.annotation.Nullable;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+
+/**
+ * Reads fields written by {@link StringFieldWriter} or {@link 
StringArrayFieldWriter}.
+ */
+public class StringFieldReader implements FieldReader
+{
+  private final boolean asArray;
+
+  StringFieldReader(final boolean asArray)
+  {
+    this.asArray = asArray;
+  }
+
+  @Override
+  public ColumnValueSelector<?> makeColumnValueSelector(Memory memory, 
ReadableFieldPointer fieldPointer)
+  {
+    return new Selector(memory, fieldPointer, null, asArray);
+  }
+
+  @Override
+  public DimensionSelector makeDimensionSelector(
+      Memory memory,
+      ReadableFieldPointer fieldPointer,
+      @Nullable ExtractionFn extractionFn
+  )
+  {
+    if (asArray) {
+      throw new ISE("Cannot call makeDimensionSelector on field of type [%s]", 
ColumnType.STRING_ARRAY);
+    }
+
+    return new Selector(memory, fieldPointer, extractionFn, false);
+  }
+
+  @Override
+  public boolean isComparable()
+  {
+    return true;
+  }
+
+  private static class Selector implements DimensionSelector
+  {
+    private final Memory memory;
+    private final ReadableFieldPointer fieldPointer;
+    @Nullable
+    private final ExtractionFn extractionFn;
+    private final boolean asArray;
+
+    private long currentFieldPosition = -1;
+    private final RangeIndexedInts indexedInts = new RangeIndexedInts();
+    private final List<ByteBuffer> currentUtf8Strings = new ArrayList<>();
+
+    private Selector(
+        final Memory memory,
+        final ReadableFieldPointer fieldPointer,
+        @Nullable final ExtractionFn extractionFn,
+        final boolean asArray
+    )
+    {
+      this.memory = memory;
+      this.fieldPointer = fieldPointer;
+      this.extractionFn = extractionFn;
+      this.asArray = asArray;
+    }
+
+    @Nullable
+    @Override
+    public Object getObject()
+    {
+      final List<ByteBuffer> currentStrings = computeCurrentUtf8Strings();
+      final int size = currentStrings.size();
+
+      if (size == 0) {
+        return asArray ? Collections.emptyList() : null;
+      } else if (size == 1) {
+        return asArray ? Collections.singletonList(lookupName(0)) : 
lookupName(0);
+      } else {
+        final List<String> strings = new ArrayList<>(size);
+        for (int i = 0; i < size; i++) {
+          strings.add(lookupName(i));
+        }
+        return strings;
+      }
+    }
+
+    @Override
+    public IndexedInts getRow()
+    {
+      indexedInts.setSize(computeCurrentUtf8Strings().size());
+      return indexedInts;
+    }
+
+    @Nullable
+    @Override
+    public String lookupName(int id)
+    {
+      final ByteBuffer byteBuffer = computeCurrentUtf8Strings().get(id);
+      final String s = byteBuffer != null ? 
StringUtils.fromUtf8(byteBuffer.duplicate()) : null;
+      return extractionFn == null ? s : extractionFn.apply(s);
+    }
+
+    @Override
+    public boolean supportsLookupNameUtf8()
+    {
+      return extractionFn == null;
+    }
+
+    @Nullable
+    @Override
+    public ByteBuffer lookupNameUtf8(int id)
+    {
+      if (extractionFn != null) {
+        throw new ISE("Cannot use lookupNameUtf8 on this selector");
+      }
+
+      return computeCurrentUtf8Strings().get(id);
+    }
+
+    @Override
+    public int getValueCardinality()
+    {
+      return CARDINALITY_UNKNOWN;
+    }

Review Comment:
   I got a bit side-tracked with some other stuff and haven't got back to 
`DimensionSelector` yet, but still thinking about it in the back of my mind. I 
mainly want to do 2 things to it, 1) decouple the dictionary encoded selector 
part from strings so we can make more easily make dictionary encoded selectors 
for other column types and use them in places, and 2) split out the 'use it 
like an array' usages from the dictionary encoded uses, since its the reason we 
have to decorate these selectors with stuff like the cardinality and methods 
like 'nameLookupPossibleInAdvance', so that callers can check to see what type 
of selector they got so they know what they can do with it. I would agree the 
selector itself might not be the best place for this stuff to live, so will 
keep this in mind.



##########
processing/src/main/java/org/apache/druid/frame/file/FrameFileWriter.java:
##########
@@ -0,0 +1,216 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.druid.frame.file;
+
+import com.google.common.primitives.Ints;
+import org.apache.datasketches.memory.WritableMemory;
+import org.apache.druid.frame.Frame;
+import org.apache.druid.frame.allocation.AppendableMemory;
+import org.apache.druid.frame.allocation.HeapMemoryAllocator;
+import org.apache.druid.frame.allocation.MemoryRange;
+import org.apache.druid.io.Channels;
+import org.apache.druid.java.util.common.ISE;
+
+import javax.annotation.Nullable;
+import java.io.Closeable;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+import java.nio.channels.WritableByteChannel;
+
+/**
+ * Writer for {@link FrameFile}. See that class for format information.
+ */
+public class FrameFileWriter implements Closeable
+{
+  public static final byte[] MAGIC = {(byte) 0xff, 0x01};
+  public static final byte MARKER_FRAME = (byte) 0x01;
+  public static final byte MARKER_NO_MORE_FRAMES = (byte) 0x02;
+  public static final int TRAILER_LENGTH = Integer.BYTES * 2;
+  public static final int NO_PARTITION = -1;
+
+  private final WritableByteChannel channel;
+  private final AppendableMemory tableOfContents;
+  private final AppendableMemory partitions;
+  private long bytesWritten = 0;
+  private int numFrames = 0;
+  private boolean usePartitions = true;
+  private ByteBuffer compressionBuffer;
+  private boolean closed = false;
+
+  private FrameFileWriter(
+      final WritableByteChannel channel,
+      @Nullable final ByteBuffer compressionBuffer,
+      final AppendableMemory tableOfContents,
+      final AppendableMemory partitions
+  )
+  {
+    this.channel = channel;
+    this.compressionBuffer = compressionBuffer;
+    this.tableOfContents = tableOfContents;
+    this.partitions = partitions;
+  }
+
+  /**
+   * Opens a writer for a particular channel.
+   *
+   * @param channel           destination channel
+   * @param compressionBuffer result of {@link Frame#compressionBufferSize} 
for the largest possible frame size that
+   *                          will be written to this file, or null to 
allocate buffers dynamically.
+   *                          Providing an explicit buffer here, if possible, 
improves performance.
+   */
+  public static FrameFileWriter open(final WritableByteChannel channel, 
@Nullable final ByteBuffer compressionBuffer)
+  {
+    // Unlimited allocator is for convenience. Only a few bytes per frame will 
be allocated.
+    final HeapMemoryAllocator allocator = HeapMemoryAllocator.unlimited();
+    return new FrameFileWriter(
+        channel,
+        compressionBuffer,
+        AppendableMemory.create(allocator),
+        AppendableMemory.create(allocator)
+    );
+  }
+
+  /**
+   * Write a frame.
+   *
+   * @param frame     the frame
+   * @param partition partition number for a partitioned frame file, or {@link 
#NO_PARTITION} for an unpartitioned file.
+   *                  Must be monotonically increasing.
+   */
+  public void writeFrame(final Frame frame, final int partition) throws 
IOException
+  {
+    if (numFrames == Integer.MAX_VALUE) {
+      throw new ISE("Too many frames");
+    }
+
+    if (partition < 0 && numFrames == 0) {
+      usePartitions = false;
+    }
+
+    if (partition >= 0 != usePartitions) {
+      throw new ISE("Cannot mix partitioned and non-partitioned data");
+    }
+
+    if (!tableOfContents.reserve(Long.BYTES)) {
+      // Not likely to happen due to allocator limit of Long.MAX_VALUE.
+      throw new ISE("Too many frames");
+    }
+
+    writeMagicIfNeeded();
+
+    Channels.writeFully(channel, ByteBuffer.wrap(new byte[]{MARKER_FRAME}));
+    bytesWritten++;
+    bytesWritten += frame.writeTo(channel, true, 
getCompressionBuffer(frame.numBytes()));
+
+    // Write *end* of frame to tableOfContents.
+    final MemoryRange<WritableMemory> tocCursor = tableOfContents.cursor();
+    tocCursor.memory().putLong(tocCursor.start(), bytesWritten);
+    tableOfContents.advanceCursor(Long.BYTES);
+
+    if (usePartitions) {
+      // Write new partition if needed.
+      int highestPartitionWritten = Ints.checkedCast(partitions.size() / 
Integer.BYTES) - 1;
+
+      if (partition < highestPartitionWritten) {
+        // Partition number cannot go backwards.
+        throw new ISE("Partition [%,d] < highest partition [%,d]", partition, 
highestPartitionWritten);
+      }
+
+      while (partition > highestPartitionWritten) {
+        if (!partitions.reserve(Integer.BYTES)) {
+          // Not likely to happen due to allocator limit of Long.MAX_VALUE. 
But, if this happens, the file is corrupt.
+          // Throw an error so the caller knows it is bad.
+          throw new ISE("Too many partitions");
+        }
+
+        final MemoryRange<WritableMemory> partitionCursor = 
partitions.cursor();
+        highestPartitionWritten++;
+        partitionCursor.memory().putInt(partitionCursor.start(), numFrames);
+        partitions.advanceCursor(Integer.BYTES);
+      }
+    }
+
+    numFrames++;
+  }
+
+  /**
+   * Stops writing this file and closes early. Readers will be able to detect 
that the file is truncated due to the
+   * lack of {@link #MARKER_NO_MORE_FRAMES}.
+   *
+   * After calling this method, {@link #close()} does nothing.
+   */
+  public void abort() throws IOException
+  {
+    if (!closed) {
+      partitions.close();
+      tableOfContents.close();
+      channel.close();
+      compressionBuffer = null;
+      closed = true;
+    }
+  }
+
+  @Override
+  public void close() throws IOException
+  {
+    if (closed) {
+      // Already closed things in abort().
+      return;
+    }
+
+    writeMagicIfNeeded();
+
+    if (!tableOfContents.reserve(TRAILER_LENGTH)) {
+      throw new ISE("Can't finish table of contents");
+    }
+    final MemoryRange<WritableMemory> tocCursor = tableOfContents.cursor();
+    tocCursor.memory().putInt(tocCursor.start(), numFrames);
+    tocCursor.memory().putInt(tocCursor.start() + Integer.BYTES, 
Ints.checkedCast(partitions.size() / Integer.BYTES));
+    tableOfContents.advanceCursor(TRAILER_LENGTH);
+    channel.write(ByteBuffer.wrap(new byte[]{MARKER_NO_MORE_FRAMES}));
+    partitions.writeTo(channel);
+    partitions.close();
+    tableOfContents.writeTo(channel);
+    tableOfContents.close();
+    channel.close();
+    compressionBuffer = null;
+    closed = true;
+  }
+
+  private void writeMagicIfNeeded() throws IOException
+  {
+    if (numFrames == 0) {
+      Channels.writeFully(channel, ByteBuffer.wrap(MAGIC));
+      bytesWritten += MAGIC.length;
+    }
+  }
+
+  private ByteBuffer getCompressionBuffer(final long frameSize)
+  {
+    final int requiredSize = Frame.compressionBufferSize(frameSize);
+
+    if (compressionBuffer == null || compressionBuffer.capacity() < 
requiredSize) {
+      // Re-allocate a larger buffer.
+      compressionBuffer = ByteBuffer.allocate(requiredSize);

Review Comment:
   i wonder if this should this use the compression buffer allocation stuff, or 
did you not want to deal with tracking closing stuff?
   
   I'm unsure exactly if some compression strategies require specific buffer 
types, I know this came up recently in zstd PR #12408 about it needing direct 
buffers, but recall that we adjusted that to work with heap memory too. I don't 
fully remember if lzf has strong opinions on buffer type or not, and I guess it 
doesn't really matter too much right now since stuff is fixed to use lz4.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to