openinx commented on a change in pull request #3248:
URL: https://github.com/apache/iceberg/pull/3248#discussion_r726980379
##########
File path:
spark/src/main/java/org/apache/iceberg/spark/data/SparkOrcValueWriters.java
##########
@@ -19,245 +19,130 @@
package org.apache.iceberg.spark.data;
+import java.util.List;
import java.util.stream.Stream;
-import org.apache.iceberg.DoubleFieldMetrics;
import org.apache.iceberg.FieldMetrics;
-import org.apache.iceberg.FloatFieldMetrics;
+import org.apache.iceberg.orc.OrcValueWriter;
+import org.apache.orc.TypeDescription;
import org.apache.orc.storage.common.type.HiveDecimal;
import org.apache.orc.storage.ql.exec.vector.BytesColumnVector;
import org.apache.orc.storage.ql.exec.vector.ColumnVector;
import org.apache.orc.storage.ql.exec.vector.DecimalColumnVector;
-import org.apache.orc.storage.ql.exec.vector.DoubleColumnVector;
import org.apache.orc.storage.ql.exec.vector.ListColumnVector;
-import org.apache.orc.storage.ql.exec.vector.LongColumnVector;
import org.apache.orc.storage.ql.exec.vector.MapColumnVector;
import org.apache.orc.storage.ql.exec.vector.TimestampColumnVector;
-import org.apache.spark.sql.catalyst.expressions.SpecializedGetters;
import org.apache.spark.sql.catalyst.util.ArrayData;
import org.apache.spark.sql.catalyst.util.MapData;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.unsafe.types.UTF8String;
class SparkOrcValueWriters {
private SparkOrcValueWriters() {
}
- static SparkOrcValueWriter booleans() {
- return BooleanWriter.INSTANCE;
- }
-
- static SparkOrcValueWriter bytes() {
- return ByteWriter.INSTANCE;
- }
-
- static SparkOrcValueWriter shorts() {
- return ShortWriter.INSTANCE;
- }
-
- static SparkOrcValueWriter ints() {
- return IntWriter.INSTANCE;
- }
-
- static SparkOrcValueWriter longs() {
- return LongWriter.INSTANCE;
- }
-
- static SparkOrcValueWriter floats(int id) {
- return new FloatWriter(id);
- }
-
- static SparkOrcValueWriter doubles(int id) {
- return new DoubleWriter(id);
- }
-
- static SparkOrcValueWriter byteArrays() {
- return BytesWriter.INSTANCE;
- }
-
- static SparkOrcValueWriter strings() {
+ static OrcValueWriter<?> strings() {
return StringWriter.INSTANCE;
}
- static SparkOrcValueWriter timestampTz() {
+ static OrcValueWriter<?> timestampTz() {
return TimestampTzWriter.INSTANCE;
}
- static SparkOrcValueWriter decimal(int precision, int scale) {
+ static OrcValueWriter<?> decimal(int precision, int scale) {
if (precision <= 18) {
- return new Decimal18Writer(precision, scale);
+ return new Decimal18Writer(scale);
} else {
- return new Decimal38Writer(precision, scale);
- }
- }
-
- static SparkOrcValueWriter list(SparkOrcValueWriter element) {
- return new ListWriter(element);
- }
-
- static SparkOrcValueWriter map(SparkOrcValueWriter keyWriter,
SparkOrcValueWriter valueWriter) {
- return new MapWriter(keyWriter, valueWriter);
- }
-
- private static class BooleanWriter implements SparkOrcValueWriter {
- private static final BooleanWriter INSTANCE = new BooleanWriter();
-
- @Override
- public void nonNullWrite(int rowId, int column, SpecializedGetters data,
ColumnVector output) {
- ((LongColumnVector) output).vector[rowId] = data.getBoolean(column) ? 1
: 0;
- }
- }
-
- private static class ByteWriter implements SparkOrcValueWriter {
- private static final ByteWriter INSTANCE = new ByteWriter();
-
- @Override
- public void nonNullWrite(int rowId, int column, SpecializedGetters data,
ColumnVector output) {
- ((LongColumnVector) output).vector[rowId] = data.getByte(column);
- }
- }
-
- private static class ShortWriter implements SparkOrcValueWriter {
- private static final ShortWriter INSTANCE = new ShortWriter();
-
- @Override
- public void nonNullWrite(int rowId, int column, SpecializedGetters data,
ColumnVector output) {
- ((LongColumnVector) output).vector[rowId] = data.getShort(column);
+ return new Decimal38Writer();
}
}
- private static class IntWriter implements SparkOrcValueWriter {
- private static final IntWriter INSTANCE = new IntWriter();
-
- @Override
- public void nonNullWrite(int rowId, int column, SpecializedGetters data,
ColumnVector output) {
- ((LongColumnVector) output).vector[rowId] = data.getInt(column);
- }
+ static OrcValueWriter<?> list(OrcValueWriter<?> element,
List<TypeDescription> orcType) {
+ return new ListWriter(element, orcType);
}
- private static class LongWriter implements SparkOrcValueWriter {
- private static final LongWriter INSTANCE = new LongWriter();
-
- @Override
- public void nonNullWrite(int rowId, int column, SpecializedGetters data,
ColumnVector output) {
- ((LongColumnVector) output).vector[rowId] = data.getLong(column);
- }
+ static OrcValueWriter<?> map(OrcValueWriter<?> keyWriter, OrcValueWriter<?>
valueWriter,
+ List<TypeDescription> orcType) {
Review comment:
Why need the `orcType` in this map constructor ?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]