yihua commented on code in PR #13882:
URL: https://github.com/apache/hudi/pull/13882#discussion_r2345522286
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowParquetWriteSupport.java:
##########
@@ -18,47 +18,149 @@
package org.apache.hudi.io.storage.row;
+import org.apache.hudi.HoodieSparkUtils;
+import org.apache.hudi.SparkAdapterSupport;
+import org.apache.hudi.avro.HoodieAvroUtils;
import org.apache.hudi.avro.HoodieBloomFilterWriteSupport;
import org.apache.hudi.common.bloom.BloomFilter;
import org.apache.hudi.common.config.HoodieConfig;
import org.apache.hudi.common.config.HoodieStorageConfig;
import org.apache.hudi.common.util.Option;
import org.apache.hudi.common.util.ReflectionUtils;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.internal.schema.convert.AvroInternalSchemaConverter;
+import org.apache.hudi.internal.schema.utils.SerDeHelper;
+import org.apache.avro.LogicalType;
+import org.apache.avro.LogicalTypes;
+import org.apache.avro.Schema;
import org.apache.hadoop.conf.Configuration;
+import org.apache.parquet.avro.AvroSchemaConverter;
+import org.apache.parquet.avro.AvroWriteSupport;
import org.apache.parquet.hadoop.api.WriteSupport;
+import org.apache.parquet.io.api.Binary;
+import org.apache.parquet.io.api.RecordConsumer;
+import org.apache.parquet.schema.MessageType;
+import org.apache.spark.sql.HoodieInternalRowUtils;
+import org.apache.spark.sql.catalyst.InternalRow;
+import org.apache.spark.sql.catalyst.expressions.SpecializedGetters;
+import org.apache.spark.sql.catalyst.util.ArrayData;
+import org.apache.spark.sql.catalyst.util.DateTimeUtils;
+import org.apache.spark.sql.catalyst.util.MapData;
+import org.apache.spark.sql.execution.datasources.DataSourceUtils;
import org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport;
+import org.apache.spark.sql.internal.SQLConf;
+import org.apache.spark.sql.types.ArrayType;
+import org.apache.spark.sql.types.DataType;
+import org.apache.spark.sql.types.DataTypes;
+import org.apache.spark.sql.types.Decimal;
+import org.apache.spark.sql.types.DecimalType;
+import org.apache.spark.sql.types.MapType;
import org.apache.spark.sql.types.StructType;
+import org.apache.spark.sql.types.TimestampNTZType;
import org.apache.spark.unsafe.types.UTF8String;
+import org.apache.spark.util.VersionUtils;
import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
+import java.util.stream.IntStream;
+import scala.Enumeration;
+import scala.Function1;
+
+import static org.apache.hudi.avro.AvroSchemaUtils.resolveNullableSchema;
import static
org.apache.hudi.common.config.HoodieStorageConfig.PARQUET_FIELD_ID_WRITE_ENABLED;
/**
- * Hoodie Write Support for directly writing Row to Parquet.
+ * Hoodie Write Support for directly writing Row to Parquet and adding the
Hudi bloom index to the file metadata.
+ * The implementation is adapted from Spark's {@link
org.apache.spark.sql.execution.datasources.parquet.ParquetWriteSupport} but has
the following major differences:
+ * <ul>
+ * <li>Decimals are always written with the legacy format to ensure
compatibility with parquet-avro and other readers</li>
+ * <li>Writing 2-level or 3-level lists is configurable with either the Hudi
option, hoodie.parquet.writelegacyformat.enabled, or the parquet-avro option,
parquet.avro.write-old-list-structure
+ * to ensure consistency across writer paths.</li>
+ * <li>The scale of the timestamps is determined by the Hudi writer schema
instead of relying on Spark configuration</li>
+ * </ul>
*/
-public class HoodieRowParquetWriteSupport extends ParquetWriteSupport {
+public class HoodieRowParquetWriteSupport extends WriteSupport<InternalRow> {
+ private static final Schema MAP_KEY_SCHEMA =
Schema.create(Schema.Type.STRING);
+ private static final String MAP_REPEATED_NAME = "key_value";
+ private static final String MAP_KEY_NAME = "key";
+ private static final String MAP_VALUE_NAME = "value";
private final Configuration hadoopConf;
private final Option<HoodieBloomFilterWriteSupport<UTF8String>>
bloomFilterWriteSupportOpt;
+ private final byte[] decimalBuffer = new
byte[Decimal.minBytesForPrecision()[DecimalType.MAX_PRECISION()]];
+ private final Enumeration.Value datetimeRebaseMode = (Enumeration.Value)
SparkAdapterSupport.getSparkAdapter().getDateTimeRebaseMode();
+ private final Function1<Object, Object> dateRebaseFunction =
DataSourceUtils.createDateRebaseFuncInWrite(datetimeRebaseMode, "Parquet");
+ private final Function1<Object, Object> timestampRebaseFunction =
DataSourceUtils.createTimestampRebaseFuncInWrite(datetimeRebaseMode, "Parquet");
+ private RecordConsumer recordConsumer;
+ private final boolean writeLegacyListFormat;
+ private final ValueWriter[] rootFieldWriters;
+ private final Schema avroSchema;
- public HoodieRowParquetWriteSupport(Configuration conf, StructType
structType, Option<BloomFilter> bloomFilterOpt, HoodieConfig config) {
+ public HoodieRowParquetWriteSupport(Configuration conf, StructType schema,
Option<BloomFilter> bloomFilterOpt, HoodieWriteConfig config) {
Configuration hadoopConf = new Configuration(conf);
- hadoopConf.set("spark.sql.parquet.writeLegacyFormat",
config.getStringOrDefault(HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED,
"false"));
+ String writeLegacyFormatEnabled =
config.getStringOrDefault(HoodieStorageConfig.PARQUET_WRITE_LEGACY_FORMAT_ENABLED,
"false");
+ hadoopConf.set("spark.sql.parquet.writeLegacyFormat",
writeLegacyFormatEnabled);
hadoopConf.set("spark.sql.parquet.outputTimestampType",
config.getStringOrDefault(HoodieStorageConfig.PARQUET_OUTPUT_TIMESTAMP_TYPE));
hadoopConf.set("spark.sql.parquet.fieldId.write.enabled",
config.getStringOrDefault(PARQUET_FIELD_ID_WRITE_ENABLED));
- setSchema(structType, hadoopConf);
-
+ this.writeLegacyListFormat = Boolean.parseBoolean(writeLegacyFormatEnabled)
+ ||
Boolean.parseBoolean(config.getStringOrDefault(AvroWriteSupport.WRITE_OLD_LIST_STRUCTURE,
"false"));
+ this.avroSchema =
SerDeHelper.fromJson(config.getInternalSchema()).map(internalSchema ->
AvroInternalSchemaConverter.convert(internalSchema, "spark_schema"))
+ .orElseGet(() -> {
+ Schema parsedSchema = new
Schema.Parser().parse(config.getWriteSchema());
+ if (config.populateMetaFields()) {
+ return HoodieAvroUtils.addMetadataFields(parsedSchema,
config.allowOperationMetadataField());
+ }
+ return parsedSchema;
+ });
+
ParquetWriteSupport.setSchema(HoodieInternalRowUtils.getCachedSchema(avroSchema),
hadoopConf);
+ this.rootFieldWriters = getFieldWriters(schema, avroSchema);
this.hadoopConf = hadoopConf;
this.bloomFilterWriteSupportOpt =
bloomFilterOpt.map(HoodieBloomFilterRowWriteSupport::new);
}
+ private ValueWriter[] getFieldWriters(StructType schema, Schema avroSchema) {
+ Map<String, Integer> fieldNameToIndex = new HashMap<>();
+ for (int i = 0; i < schema.fields().length; i++) {
+ fieldNameToIndex.put(schema.fields()[i].name(), i);
+ }
+ return IntStream.range(0, avroSchema.getFields().size()).mapToObj(i -> {
+ Schema.Field field = avroSchema.getFields().get(i);
+ Integer structIndex = fieldNameToIndex.get(field.name());
+ return makeWriter(field.schema(),
schema.fields()[structIndex].dataType());
+ }).toArray(ValueWriter[]::new);
+ }
+
public Configuration getHadoopConf() {
return hadoopConf;
}
+ @Override
+ public WriteContext init(Configuration configuration) {
+ Map<String, String> metadata = new HashMap<>();
+ metadata.put("org.apache.spark.version",
VersionUtils.shortVersion(HoodieSparkUtils.getSparkVersion()));
Review Comment:
Do we want to add a custom metadata field here to indicate the Hudi
version/write support with which the parquet is written so we know the parquet
file is written with `HoodieRowParquetWriteSupport`?
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowParquetWriteSupport.java:
##########
@@ -73,6 +175,173 @@ public void add(UTF8String recordKey) {
bloomFilterWriteSupport.addKey(recordKey));
}
+ @FunctionalInterface
+ private interface ValueWriter {
+ void write(SpecializedGetters row, int ordinal);
+ }
+
+ private void consumeMessage(Runnable writer) {
+ recordConsumer.startMessage();
+ writer.run();
+ recordConsumer.endMessage();
+ }
+
+ private void consumeGroup(Runnable writer) {
+ recordConsumer.startGroup();
+ writer.run();
+ recordConsumer.endGroup();
+ }
+
+ private void consumeField(String field, int index, Runnable writer) {
+ recordConsumer.startField(field, index);
+ writer.run();
+ recordConsumer.endField(field, index);
+ }
+
+ private void writeFields(InternalRow row, Schema schema, ValueWriter[]
fieldWriters) {
+ for (int i = 0; i < fieldWriters.length; i++) {
+ int index = i;
+ if (!row.isNullAt(i)) {
+ Schema.Field field = schema.getFields().get(index);
+ consumeField(field.name(), index, () -> fieldWriters[index].write(row,
index));
+ }
+ }
+ }
+
+ private ValueWriter makeWriter(Schema avroSchema, DataType dataType) {
+ Schema resolvedSchema = resolveNullableSchema(avroSchema);
+ Schema.Type type = resolvedSchema.getType();
+ LogicalType logicalType = resolvedSchema.getLogicalType();
+ switch (type) {
+ case BOOLEAN:
+ return (row, ordinal) ->
recordConsumer.addBoolean(row.getBoolean(ordinal));
+ case INT:
+ if (logicalType != null) {
+ if (logicalType.getName().equals(LogicalTypes.date().getName())) {
+ return (row, ordinal) -> recordConsumer.addInteger((Integer)
dateRebaseFunction.apply(row.getInt(ordinal)));
+ }
+ }
+ return (row, ordinal) ->
recordConsumer.addInteger(row.getInt(ordinal));
+ case LONG:
+ if (logicalType != null) {
+ if
(logicalType.getName().equals(LogicalTypes.timestampMillis().getName())) {
+ return (row, ordinal) ->
recordConsumer.addLong(DateTimeUtils.microsToMillis((long)
timestampRebaseFunction.apply(row.getLong(ordinal))));
Review Comment:
Does this assume that the input row uses micros for `TimestampType`?
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowParquetWriteSupport.java:
##########
@@ -73,6 +175,173 @@ public void add(UTF8String recordKey) {
bloomFilterWriteSupport.addKey(recordKey));
}
+ @FunctionalInterface
+ private interface ValueWriter {
+ void write(SpecializedGetters row, int ordinal);
+ }
+
+ private void consumeMessage(Runnable writer) {
+ recordConsumer.startMessage();
+ writer.run();
+ recordConsumer.endMessage();
+ }
+
+ private void consumeGroup(Runnable writer) {
+ recordConsumer.startGroup();
+ writer.run();
+ recordConsumer.endGroup();
+ }
+
+ private void consumeField(String field, int index, Runnable writer) {
+ recordConsumer.startField(field, index);
+ writer.run();
+ recordConsumer.endField(field, index);
+ }
+
+ private void writeFields(InternalRow row, Schema schema, ValueWriter[]
fieldWriters) {
+ for (int i = 0; i < fieldWriters.length; i++) {
+ int index = i;
+ if (!row.isNullAt(i)) {
+ Schema.Field field = schema.getFields().get(index);
+ consumeField(field.name(), index, () -> fieldWriters[index].write(row,
index));
+ }
+ }
+ }
+
+ private ValueWriter makeWriter(Schema avroSchema, DataType dataType) {
+ Schema resolvedSchema = resolveNullableSchema(avroSchema);
+ Schema.Type type = resolvedSchema.getType();
+ LogicalType logicalType = resolvedSchema.getLogicalType();
+ switch (type) {
+ case BOOLEAN:
+ return (row, ordinal) ->
recordConsumer.addBoolean(row.getBoolean(ordinal));
+ case INT:
Review Comment:
Should `time-micros` be handled too?
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/io/storage/row/HoodieRowParquetWriteSupport.java:
##########
@@ -73,6 +175,173 @@ public void add(UTF8String recordKey) {
bloomFilterWriteSupport.addKey(recordKey));
}
+ @FunctionalInterface
+ private interface ValueWriter {
+ void write(SpecializedGetters row, int ordinal);
+ }
+
+ private void consumeMessage(Runnable writer) {
+ recordConsumer.startMessage();
+ writer.run();
+ recordConsumer.endMessage();
+ }
+
+ private void consumeGroup(Runnable writer) {
+ recordConsumer.startGroup();
+ writer.run();
+ recordConsumer.endGroup();
+ }
+
+ private void consumeField(String field, int index, Runnable writer) {
+ recordConsumer.startField(field, index);
+ writer.run();
+ recordConsumer.endField(field, index);
+ }
+
+ private void writeFields(InternalRow row, Schema schema, ValueWriter[]
fieldWriters) {
+ for (int i = 0; i < fieldWriters.length; i++) {
+ int index = i;
+ if (!row.isNullAt(i)) {
+ Schema.Field field = schema.getFields().get(index);
+ consumeField(field.name(), index, () -> fieldWriters[index].write(row,
index));
+ }
+ }
+ }
+
+ private ValueWriter makeWriter(Schema avroSchema, DataType dataType) {
+ Schema resolvedSchema = resolveNullableSchema(avroSchema);
+ Schema.Type type = resolvedSchema.getType();
+ LogicalType logicalType = resolvedSchema.getLogicalType();
+ switch (type) {
+ case BOOLEAN:
+ return (row, ordinal) ->
recordConsumer.addBoolean(row.getBoolean(ordinal));
+ case INT:
Review Comment:
Based on Avro spec, `time-millis` logical type annotates an Avro `int`, so
`time-millis` should be handled here?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]