mqliang commented on a change in pull request #6710: URL: https://github.com/apache/incubator-pinot/pull/6710#discussion_r604447733
########## File path: pinot-core/src/main/java/org/apache/pinot/core/common/datatable/DataTableImplV3.java ########## @@ -0,0 +1,397 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.pinot.core.common.datatable; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import java.util.Optional; +import org.apache.pinot.common.response.ProcessingException; +import org.apache.pinot.common.utils.DataSchema; +import org.apache.pinot.common.utils.StringUtil; +import org.apache.pinot.core.query.request.context.ThreadTimer; + +import static org.apache.pinot.common.utils.DataTable.MetadataKeys.THREAD_CPU_TIME_NS; +import static org.apache.pinot.core.common.datatable.DataTableBuilder.VERSION_3; + + +/** + * Datatable V3 implementation. + * The layout of serialized V3 datatable looks like: + * +-----------------------------------------------+ + * | 13 bytes of header: | + * | VERSION | + * | NUM_ROWS | + * | NUM_COLUMNS | + * | EXCEPTIONS SECTION START OFFSET | + * | EXCEPTIONS SECTION LENGTH | + * | DICTIONARY_MAP SECTION START OFFSET | + * | DICTIONARY_MAP SECTION LENGTH | + * | DATA_SCHEMA SECTION START OFFSET | + * | DATA_SCHEMA SECTION LENGTH | + * | FIXED_SIZE_DATA SECTION START OFFSET | + * | FIXED_SIZE_DATA SECTION LENGTH | + * | VARIABLE_SIZE_DATA SECTION START OFFSET | + * | VARIABLE_SIZE_DATA SECTION LENGTH | + * +-----------------------------------------------+ + * | EXCEPTIONS SECTION | + * +-----------------------------------------------+ + * | DICTIONARY_MAP SECTION | + * +-----------------------------------------------+ + * | DATA_SCHEMA SECTION | + * +-----------------------------------------------+ + * | FIXED_SIZE_DATA SECTION | + * +-----------------------------------------------+ + * | VARIABLE_SIZE_DATA SECTION | + * +-----------------------------------------------+ + * | METADATA LENGTH | + * | METADATA SECTION | + * +-----------------------------------------------+ + */ +public class DataTableImplV3 extends DataTableImplBase { + private static final int HEADER_SIZE = Integer.BYTES * 13; + // _exceptions stores exceptions as a map of errorCode->errorMessage + private final Map<Integer, String> _exceptions; + + /** + * Construct data table with results. (Server side) + */ + public DataTableImplV3(int numRows, DataSchema dataSchema, Map<String, Map<Integer, String>> dictionaryMap, + byte[] fixedSizeDataBytes, byte[] variableSizeDataBytes) { + super(numRows, dataSchema, dictionaryMap, fixedSizeDataBytes, variableSizeDataBytes); + _exceptions = new HashMap<>(); + } + + /** + * Construct empty data table. (Server side) + */ + public DataTableImplV3() { + super(); + _exceptions = new HashMap<>(); + } + + /** + * Construct data table from byte array. (broker side) + */ + public DataTableImplV3(ByteBuffer byteBuffer) + throws IOException { + // Read header. + _numRows = byteBuffer.getInt(); + _numColumns = byteBuffer.getInt(); + int exceptionsStart = byteBuffer.getInt(); + int exceptionsLength = byteBuffer.getInt(); + int dictionaryMapStart = byteBuffer.getInt(); + int dictionaryMapLength = byteBuffer.getInt(); + int dataSchemaStart = byteBuffer.getInt(); + int dataSchemaLength = byteBuffer.getInt(); + int fixedSizeDataStart = byteBuffer.getInt(); + int fixedSizeDataLength = byteBuffer.getInt(); + int variableSizeDataStart = byteBuffer.getInt(); + int variableSizeDataLength = byteBuffer.getInt(); + + // Read exceptions. + if (exceptionsLength != 0) { + byte[] exceptionsBytes = new byte[exceptionsLength]; + byteBuffer.position(exceptionsStart); + byteBuffer.get(exceptionsBytes); + _exceptions = deserializeExceptions(exceptionsBytes); + } else { + _exceptions = new HashMap<>(); + } + + // Read dictionary. + if (dictionaryMapLength != 0) { + byte[] dictionaryMapBytes = new byte[dictionaryMapLength]; + byteBuffer.position(dictionaryMapStart); + byteBuffer.get(dictionaryMapBytes); + _dictionaryMap = deserializeDictionaryMap(dictionaryMapBytes); + } else { + _dictionaryMap = null; + } + + // Read data schema. + if (dataSchemaLength != 0) { + byte[] schemaBytes = new byte[dataSchemaLength]; + byteBuffer.position(dataSchemaStart); + byteBuffer.get(schemaBytes); + _dataSchema = DataSchema.fromBytes(schemaBytes); + _columnOffsets = new int[_dataSchema.size()]; + _rowSizeInBytes = DataTableUtils.computeColumnOffsets(_dataSchema, _columnOffsets); + } else { + _dataSchema = null; + _columnOffsets = null; + _rowSizeInBytes = 0; + } + + // Read fixed size data. + if (fixedSizeDataLength != 0) { + _fixedSizeDataBytes = new byte[fixedSizeDataLength]; + byteBuffer.position(fixedSizeDataStart); + byteBuffer.get(_fixedSizeDataBytes); + _fixedSizeData = ByteBuffer.wrap(_fixedSizeDataBytes); + } else { + _fixedSizeDataBytes = null; + _fixedSizeData = null; + } + + // Read variable size data. + if (variableSizeDataLength != 0) { + _variableSizeDataBytes = new byte[variableSizeDataLength]; + byteBuffer.position(variableSizeDataStart); + byteBuffer.get(_variableSizeDataBytes); + _variableSizeData = ByteBuffer.wrap(_variableSizeDataBytes); + } else { + _variableSizeDataBytes = null; + _variableSizeData = null; + } + + // Read metadata. + int metadataLength = byteBuffer.getInt(); + byte[] trailerBytes = new byte[metadataLength]; + byteBuffer.get(trailerBytes); + _metadata = deserializeMetadata(trailerBytes); + } + + @Override + public void addException(ProcessingException processingException) { + _exceptions.put(processingException.getErrorCode(), processingException.getMessage()); + } + + @Override + public Map<Integer, String> getExceptions() { + return _exceptions; + } + + @Override + public byte[] toBytes() + throws IOException { + ThreadTimer threadTimer = new ThreadTimer(); + threadTimer.start(); + + ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); + DataOutputStream dataOutputStream = new DataOutputStream(byteArrayOutputStream); + dataOutputStream.writeInt(VERSION_3); + dataOutputStream.writeInt(_numRows); + dataOutputStream.writeInt(_numColumns); + int dataOffset = HEADER_SIZE; + + // Write exceptions section offset(START|SIZE). + dataOutputStream.writeInt(dataOffset); + byte[] exceptionsBytes; + exceptionsBytes = serializeExceptions(); + dataOutputStream.writeInt(exceptionsBytes.length); + dataOffset += exceptionsBytes.length; + + // Write dictionary map section offset(START|SIZE). + dataOutputStream.writeInt(dataOffset); + byte[] dictionaryMapBytes = null; + if (_dictionaryMap != null) { + dictionaryMapBytes = serializeDictionaryMap(_dictionaryMap); + dataOutputStream.writeInt(dictionaryMapBytes.length); + dataOffset += dictionaryMapBytes.length; + } else { + dataOutputStream.writeInt(0); + } + + // Write data schema section offset(START|SIZE). + dataOutputStream.writeInt(dataOffset); + byte[] dataSchemaBytes = null; + if (_dataSchema != null) { + dataSchemaBytes = _dataSchema.toBytes(); + dataOutputStream.writeInt(dataSchemaBytes.length); + dataOffset += dataSchemaBytes.length; + } else { + dataOutputStream.writeInt(0); + } + + // Write fixed size data section offset(START|SIZE). + dataOutputStream.writeInt(dataOffset); + if (_fixedSizeDataBytes != null) { + dataOutputStream.writeInt(_fixedSizeDataBytes.length); + dataOffset += _fixedSizeDataBytes.length; + } else { + dataOutputStream.writeInt(0); + } + + // Write variable size data section offset(START|SIZE). + dataOutputStream.writeInt(dataOffset); + if (_variableSizeDataBytes != null) { + dataOutputStream.writeInt(_variableSizeDataBytes.length); + } else { + dataOutputStream.writeInt(0); + } + + // Write actual data. + // Write exceptions bytes. + dataOutputStream.write(exceptionsBytes); + // Write dictionary map bytes. + if (dictionaryMapBytes != null) { + dataOutputStream.write(dictionaryMapBytes); + } + // Write data schema bytes. + if (dataSchemaBytes != null) { + dataOutputStream.write(dataSchemaBytes); + } + // Write fixed size data bytes. + if (_fixedSizeDataBytes != null) { + dataOutputStream.write(_fixedSizeDataBytes); + } + // Write variable size data bytes. + if (_variableSizeDataBytes != null) { + dataOutputStream.write(_variableSizeDataBytes); + } + + // Update the value of "threadCpuTimeNs" to account data table serialization time. + long responseSerializationCpuTimeNs = threadTimer.stopAndGetThreadTimeNs(); + // TODO: currently log/emit a total thread cpu time for query execution time and data table serialization time. + // Figure out a way to log/emit separately. Probably via providing an API on the DataTable to get/set query + // context, which is supposed to be used at server side only. + long threadCpuTimeNs = + Long.parseLong(getMetadata().getOrDefault(THREAD_CPU_TIME_NS.getName(), "0")) + responseSerializationCpuTimeNs; + getMetadata().put(THREAD_CPU_TIME_NS.getName(), String.valueOf(threadCpuTimeNs)); + + // Write metadata length and bytes. + byte[] metadataBytes = serializeMetadata(); + dataOutputStream.writeInt(metadataBytes.length); + dataOutputStream.write(metadataBytes); + + return byteArrayOutputStream.toByteArray(); + } + + /** + * Serialize metadata section to bytes. + * Format of the bytes looks like: + * [numEntries, bytesOfKV2, bytesOfKV2, bytesOfKV3] Review comment: Oh, actually the length of metadata section is written outside of this function, it's write by the caller. So the description of `[numEntries, bytesOfKV2, bytesOfKV2, bytesOfKV3]` here is correct. Has add comments at caller to highlight the length writing logic. ########## File path: pinot-core/src/main/java/org/apache/pinot/core/common/datatable/DataTableImplBase.java ########## @@ -0,0 +1,322 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.pinot.core.common.datatable; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.HashMap; +import java.util.Map; +import org.apache.commons.lang3.StringUtils; +import org.apache.pinot.common.utils.DataSchema; +import org.apache.pinot.common.utils.DataTable; +import org.apache.pinot.common.utils.StringUtil; +import org.apache.pinot.core.common.ObjectSerDeUtils; +import org.apache.pinot.spi.utils.ByteArray; +import org.apache.pinot.spi.utils.BytesUtils; + + +public abstract class DataTableImplBase implements DataTable { + protected int _numRows; + protected int _numColumns; + protected DataSchema _dataSchema; + protected int[] _columnOffsets; + protected int _rowSizeInBytes; + protected Map<String, Map<Integer, String>> _dictionaryMap; + protected byte[] _fixedSizeDataBytes; + protected ByteBuffer _fixedSizeData; + protected byte[] _variableSizeDataBytes; + protected ByteBuffer _variableSizeData; + protected Map<String, String> _metadata; + + public DataTableImplBase(int numRows, DataSchema dataSchema, Map<String, Map<Integer, String>> dictionaryMap, Review comment: done ########## File path: pinot-core/src/main/java/org/apache/pinot/core/common/datatable/DataTableBuilder.java ########## @@ -91,11 +94,16 @@ private ByteBuffer _currentRowDataByteBuffer; public DataTableBuilder(DataSchema dataSchema) { + _version = VERSION_3; Review comment: done -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
