JingsongLi commented on a change in pull request #17542:
URL: https://github.com/apache/flink/pull/17542#discussion_r761612653
##########
File path:
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/row/ParquetRowDataWriter.java
##########
@@ -165,6 +187,11 @@ public void write(RowData row, int ordinal) {
public void write(RowData row, int ordinal) {
recordConsumer.addLong(row.getLong(ordinal));
}
+
+ @Override
+ public void write(ArrayData arrayData, int ordinal) {
Review comment:
It is better to add a method:
```
private void writeLong(long value) {
recordConsumer.addLong(value);
}
```
Then these two method can reuse logical.
##########
File path:
flink-formats/flink-parquet/src/test/java/org/apache/flink/formats/parquet/row/ParquetRowDataWriterTest.java
##########
@@ -158,6 +190,65 @@ private void innerTest(Configuration conf, boolean
utcTimestamp) throws IOExcept
Assert.assertEquals(number, cnt);
}
+ public void complexTypeTest(Configuration conf, boolean utcTimestamp)
throws Exception {
+ Path path = new Path(TEMPORARY_FOLDER.newFolder().getPath(),
UUID.randomUUID().toString());
+ int number = 1000;
+ List<Row> rows = new ArrayList<>(number);
+ Map<String, String> mapData = new HashMap<>();
+ mapData.put("k1", "v1");
+ mapData.put(null, "v2");
+ mapData.put("k2", null);
+
+ for (int i = 0; i < number; i++) {
+ Integer v = i;
+ rows.add(Row.of(new Integer[] {v}, mapData,
Row.of(String.valueOf(v), v)));
+ }
+
+ ParquetWriterFactory<RowData> factory =
+ ParquetRowDataBuilder.createWriterFactory(ROW_TYPE_COMPLEX,
conf, utcTimestamp);
+ BulkWriter<RowData> writer =
+ factory.create(path.getFileSystem().create(path,
FileSystem.WriteMode.OVERWRITE));
+ for (int i = 0; i < number; i++) {
+ writer.addElement(CONVERTER_COMPLEX.toInternal(rows.get(i)));
+ }
+ writer.flush();
+ writer.finish();
+
+ File file = new File(path.getPath());
+ final List<Row> fileContent = readParquetFile(file);
+ assertEquals(rows, fileContent);
+ }
+
+ private static List<Row> readParquetFile(File file) throws IOException {
+ InputFile inFile =
+ HadoopInputFile.fromPath(
+ new org.apache.hadoop.fs.Path(file.toURI()), new
Configuration());
+
+ ArrayList<Row> results = new ArrayList<>();
+ try (ParquetReader<GenericRecord> reader =
+ AvroParquetReader.<GenericRecord>builder(inFile).build()) {
+ GenericRecord next;
+ while ((next = reader.read()) != null) {
+ Integer c0 = (Integer) ((ArrayList<GenericData.Record>)
next.get(0)).get(0).get(0);
+ HashMap<Utf8, Utf8> map = ((HashMap<Utf8, Utf8>) next.get(1));
+ String c21 = ((GenericData.Record)
next.get(2)).get(0).toString();
+ Integer c22 = (Integer) ((GenericData.Record)
next.get(2)).get(1);
+
+ Map<String, String> c1 = new HashMap<>();
+ for (Utf8 key : map.keySet()) {
+ String k = Strings.isEmpty(key) ? null : key.toString();
Review comment:
write null return empty string? is this expected?
##########
File path:
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/row/ParquetRowDataWriter.java
##########
@@ -224,6 +277,153 @@ private TimestampWriter(int precision) {
public void write(RowData row, int ordinal) {
recordConsumer.addBinary(timestampToInt96(row.getTimestamp(ordinal,
precision)));
}
+
+ @Override
+ public void write(ArrayData arrayData, int ordinal) {
+
recordConsumer.addBinary(timestampToInt96(arrayData.getTimestamp(ordinal,
precision)));
+ }
+ }
+
+ /** It writes a map field to parquet, both key and value are nullable. */
+ private class MapWriter implements FieldWriter {
+
+ private String repeatedGroupName;
+ private String keyName, valueName;
+ private FieldWriter keyWriter, valueWriter;
+
+ private MapWriter(LogicalType keyType, LogicalType valueType,
GroupType groupType) {
+ // Get the internal map structure (MAP_KEY_VALUE)
+ GroupType repeatedType = groupType.getType(0).asGroupType();
+ this.repeatedGroupName = repeatedType.getName();
+
+ // Get key element information
+ Type type = repeatedType.getType(0);
+ this.keyName = type.getName();
+ this.keyWriter = createWriter(keyType, type);
+
+ // Get value element information
+ Type valuetype = repeatedType.getType(1);
+ this.valueName = valuetype.getName();
+ this.valueWriter = createWriter(valueType, valuetype);
+ }
+
+ @Override
+ public void write(RowData row, int ordinal) {
+ recordConsumer.startGroup();
+
+ MapData mapData = row.getMap(ordinal);
+
+ if (mapData != null && mapData.size() > 0) {
+ recordConsumer.startField(repeatedGroupName, 0);
+
+ ArrayData keyArray = mapData.keyArray();
+ ArrayData valueArray = mapData.valueArray();
+ for (int i = 0; i < keyArray.size(); i++) {
+ recordConsumer.startGroup();
+ // write key element
+ recordConsumer.startField(keyName, 0);
+ keyWriter.write(keyArray, i);
+ recordConsumer.endField(keyName, 0);
+ // write value element
+ recordConsumer.startField(valueName, 1);
Review comment:
Maybe there should be `if (!valueArray.isNullAt(i))`?
##########
File path:
flink-formats/flink-parquet/src/main/java/org/apache/flink/formats/parquet/row/ParquetRowDataWriter.java
##########
@@ -224,6 +277,153 @@ private TimestampWriter(int precision) {
public void write(RowData row, int ordinal) {
recordConsumer.addBinary(timestampToInt96(row.getTimestamp(ordinal,
precision)));
}
+
+ @Override
+ public void write(ArrayData arrayData, int ordinal) {
+
recordConsumer.addBinary(timestampToInt96(arrayData.getTimestamp(ordinal,
precision)));
+ }
+ }
+
+ /** It writes a map field to parquet, both key and value are nullable. */
+ private class MapWriter implements FieldWriter {
+
+ private String repeatedGroupName;
+ private String keyName, valueName;
+ private FieldWriter keyWriter, valueWriter;
+
+ private MapWriter(LogicalType keyType, LogicalType valueType,
GroupType groupType) {
+ // Get the internal map structure (MAP_KEY_VALUE)
+ GroupType repeatedType = groupType.getType(0).asGroupType();
+ this.repeatedGroupName = repeatedType.getName();
+
+ // Get key element information
+ Type type = repeatedType.getType(0);
+ this.keyName = type.getName();
+ this.keyWriter = createWriter(keyType, type);
+
+ // Get value element information
+ Type valuetype = repeatedType.getType(1);
+ this.valueName = valuetype.getName();
+ this.valueWriter = createWriter(valueType, valuetype);
+ }
+
+ @Override
+ public void write(RowData row, int ordinal) {
+ recordConsumer.startGroup();
+
+ MapData mapData = row.getMap(ordinal);
+
+ if (mapData != null && mapData.size() > 0) {
+ recordConsumer.startField(repeatedGroupName, 0);
+
+ ArrayData keyArray = mapData.keyArray();
+ ArrayData valueArray = mapData.valueArray();
+ for (int i = 0; i < keyArray.size(); i++) {
+ recordConsumer.startGroup();
+ // write key element
+ recordConsumer.startField(keyName, 0);
Review comment:
Maybe there should be `if (!keyArray.isNullAt(i))`?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]