linliu-code commented on code in PR #13300:
URL: https://github.com/apache/hudi/pull/13300#discussion_r2098383010
##########
hudi-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadata.java:
##########
@@ -325,6 +430,81 @@ private Map<String, HoodieRecord<HoodieMetadataPayload>>
lookupKeysFromFileSlice
}
}
+ private Map<String, HoodieRecord<HoodieMetadataPayload>>
lookupKeysWithFileGroupReader(String partitionName,
+
List<String> keys,
+
FileSlice fileSlice) {
+ try {
+ // Sort it here once so that we don't need to sort individually for base
file and for each individual log files.
+ List<String> sortedKeys = new ArrayList<>(keys);
+ // So we use the natural order to sort.
+ Collections.sort(sortedKeys);
+ Option<HoodieInstant> latestMetadataInstant =
+
metadataMetaClient.getActiveTimeline().filterCompletedInstants().lastInstant();
+ String latestMetadataInstantTime =
+
latestMetadataInstant.map(HoodieInstant::requestedTime).orElse(SOLO_COMMIT_TIMESTAMP);
+ Schema schema =
HoodieAvroUtils.addMetadataFields(HoodieMetadataRecord.getClassSchema());
+ // Only those log files which have a corresponding completed instant on
the dataset should be read
+ // This is because the metadata table is updated before the dataset
instants are committed.
+ Set<String> validInstantTimestamps = getValidInstantTimestamps();
+ InstantRange instantRange = InstantRange.builder()
+ .rangeType(InstantRange.RangeType.EXACT_MATCH)
+ .explicitInstants(validInstantTimestamps).build();
+
+ HoodieFileGroupReader fileGroupReader = getFileGroupReader(
+ metadataMetaClient.getTableConfig(),
+ metadataMetaClient.getBasePath().toString(),
+ latestMetadataInstantTime,
+ fileSlice,
+ schema,
+ schema,
+ Option.empty(),
+ metadataMetaClient,
+ new TypedProperties(),
+ Collections.emptyList());
+ fileGroupReader.setInstantRange(Option.of(instantRange));
+ fileGroupReader.initRecordIterators();
+ ClosableIterator it = fileGroupReader.getClosableIterator();
+ Map<String, HoodieRecord<HoodieMetadataPayload>> records = new
HashMap<>();
+ while (it.hasNext()) {
+ HoodieMetadataRecord r = transform((GenericRecord) it.next());
+ // Remove bad results.
+ if (!keys.contains(r.getKey())) {
+ continue;
+ }
+ HoodieMetadataPayload payload = new HoodieMetadataPayload(r,
r.getKey());
+ HoodieKey key = new HoodieKey(r.getKey(), partitionName);
+ HoodieAvroRecord record = new HoodieAvroRecord(key, payload);
+ records.put(key.getRecordKey(), record);
+ }
+ return records;
+ } catch (IOException e) {
+ throw new HoodieIOException("Error merging records from metadata table
for " + keys.size() + " keys : ", e);
+ }
+ }
+
+ /**
+ * This is a temporary solution. We should create a reader to generate
HoodieMetadataRecord
+ * record directly.
+ * TODO: We should have a specific hfile writer for SpecialRecord instead of
GenericRecord.
+ * @param from
+ * @return
+ * @throws IOException
+ */
+ private static HoodieMetadataRecord transform(GenericRecord from) throws
IOException {
Review Comment:
I found that metadata writer writes GenericRecord, instead of
`HoodieMetadataRecord`. So I directly use GenericRecord now.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]