kishoreg commented on a change in pull request #7920:
URL: https://github.com/apache/pinot/pull/7920#discussion_r771895480
##########
File path:
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/segment/index/readers/forward/FixedByteChunkSVForwardIndexReader.java
##########
@@ -48,6 +52,179 @@ public ChunkReaderContext createContext() {
}
}
+ @Override
+ public void fillValues(int[] docIds, int length, int[] values,
ChunkReaderContext context) {
Review comment:
can we name it readValues to maintain symmetry with readDictIds and
other read methods
##########
File path:
pinot-segment-local/src/main/java/org/apache/pinot/segment/local/segment/index/readers/forward/FixedByteChunkSVForwardIndexReader.java
##########
@@ -48,6 +52,179 @@ public ChunkReaderContext createContext() {
}
}
+ @Override
+ public void fillValues(int[] docIds, int length, int[] values,
ChunkReaderContext context) {
+ int range = docIds[length - 1] - docIds[0];
+ if (!_isCompressed && range == length - 1) {
+ switch (getValueType().getStoredType()) {
+ case INT: {
+ int minOffset = docIds[0] * Integer.BYTES;
+ IntBuffer buffer = _rawData.toDirectByteBuffer(minOffset, length *
Integer.BYTES).asIntBuffer();
+ buffer.get(values, 0, length);
+ }
+ break;
+ case LONG: {
+ int minOffset = docIds[0] * Long.BYTES;
+ LongBuffer buffer = _rawData.toDirectByteBuffer(minOffset, length *
Long.BYTES).asLongBuffer();
+ for (int i = 0; i < length; i++) {
+ values[i] = (int) buffer.get(i);
+ }
+ }
+ break;
+ case FLOAT: {
+ int minOffset = docIds[0] * Float.BYTES;
+ FloatBuffer buffer = _rawData.toDirectByteBuffer(minOffset, length *
Float.BYTES).asFloatBuffer();
+ for (int i = 0; i < length; i++) {
+ values[i] = (int) buffer.get(i);
+ }
+ }
+ break;
+ case DOUBLE: {
+ int minOffset = docIds[0] * Double.BYTES;
+ DoubleBuffer buffer = _rawData.toDirectByteBuffer(minOffset, length
* Double.BYTES).asDoubleBuffer();
+ for (int i = 0; i < length; i++) {
+ values[i] = (int) buffer.get(i);
+ }
+ }
+ break;
+ default:
+ throw new IllegalArgumentException();
+ }
+ } else {
+ super.fillValues(docIds, length, values, context);
+ }
+ }
+
+ @Override
+ public void fillValues(int[] docIds, int length, long[] values,
ChunkReaderContext context) {
+ int range = docIds[length - 1] - docIds[0];
+ if (!_isCompressed && range == length - 1) {
Review comment:
can we add comment here saying that this is contiguous block read. might
be better to extract this logic to a separate method isContiguous to improve
readability.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]