Github user nongli commented on a diff in the pull request:
https://github.com/apache/spark/pull/10908#discussion_r51462281
--- Diff:
sql/core/src/main/java/org/apache/spark/sql/execution/datasources/parquet/VectorizedRleValuesReader.java
---
@@ -180,6 +188,152 @@ public void readIntegers(int total, ColumnVector c,
int rowId, int level,
}
}
+ // TODO: can this code duplication be removed without a perf penalty?
+ public void readBytes(int total, ColumnVector c,
+ int rowId, int level, VectorizedValuesReader data)
{
+ int left = total;
+ while (left > 0) {
+ if (this.currentCount == 0) this.readNextGroup();
+ int n = Math.min(left, this.currentCount);
+ switch (mode) {
+ case RLE:
+ if (currentValue == level) {
+ data.readBytes(n, c, rowId);
+ c.putNotNulls(rowId, n);
+ } else {
+ c.putNulls(rowId, n);
+ }
+ break;
+ case PACKED:
+ for (int i = 0; i < n; ++i) {
+ if (currentBuffer[currentBufferIdx++] == level) {
+ c.putByte(rowId + i, data.readByte());
+ c.putNotNull(rowId + i);
+ } else {
+ c.putNull(rowId + i);
+ }
+ }
+ break;
+ }
+ rowId += n;
+ left -= n;
+ currentCount -= n;
+ }
+ }
+
+ public void readLongs(int total, ColumnVector c, int rowId, int level,
+ VectorizedValuesReader data) {
+ int left = total;
+ while (left > 0) {
+ if (this.currentCount == 0) this.readNextGroup();
+ int n = Math.min(left, this.currentCount);
+ switch (mode) {
+ case RLE:
+ if (currentValue == level) {
+ data.readLongs(n, c, rowId);
+ c.putNotNulls(rowId, n);
+ } else {
+ c.putNulls(rowId, n);
+ }
+ break;
+ case PACKED:
+ for (int i = 0; i < n; ++i) {
+ if (currentBuffer[currentBufferIdx++] == level) {
+ c.putLong(rowId + i, data.readLong());
+ c.putNotNull(rowId + i);
+ } else {
+ c.putNull(rowId + i);
+ }
+ }
+ break;
+ }
+ rowId += n;
+ left -= n;
+ currentCount -= n;
+ }
+ }
+
+ public void readBinarys(int total, ColumnVector c, int rowId, int level,
+ VectorizedValuesReader data) {
+ int left = total;
+ while (left > 0) {
+ if (this.currentCount == 0) this.readNextGroup();
+ int n = Math.min(left, this.currentCount);
+ switch (mode) {
+ case RLE:
+ if (currentValue == level) {
+ c.putNotNulls(rowId, n);
+ data.readBinary(n, c, rowId);
+ } else {
+ c.putNulls(rowId, n);
+ }
+ break;
+ case PACKED:
+ for (int i = 0; i < n; ++i) {
+ if (currentBuffer[currentBufferIdx++] == level) {
+ c.putNotNull(rowId + i);
+ data.readBinary(1, c, rowId);
+ } else {
+ c.putNull(rowId + i);
+ }
+ }
+ break;
+ }
+ rowId += n;
+ left -= n;
+ currentCount -= n;
+ }
+ }
+
+
+ // This is used for decoding dictionary IDs (as opposed to definition
levels).
+ @Override
+ public void readIntegers(int total, ColumnVector c, int rowId) {
+ int left = total;
+ while (left > 0) {
+ if (this.currentCount == 0) this.readNextGroup();
+ int n = Math.min(left, this.currentCount);
+ switch (mode) {
+ case RLE:
+ c.putInts(rowId, n, currentValue);
+ break;
+ case PACKED:
+ c.putInts(rowId, n, currentBuffer, currentBufferIdx);
+ currentBufferIdx += n;
+ break;
+ }
+ rowId += n;
+ left -= n;
+ currentCount -= n;
+ }
+ }
+
+ @Override
+ public byte readByte() {
+ throw new UnsupportedOperationException("only readInts is valid.");
--- End diff --
This should be readInts. The only valid read* APIs that doesn't also decode
definition levels is used to decode dictionary ids, which are always ints. I
updated the comment for readIntegers() to try to capture this.
---
If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. If your project does not have this feature
enabled and wishes so, or if the feature is enabled but not working, please
contact infrastructure at [email protected] or file a JIRA ticket
with INFRA.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]