nsivabalan commented on code in PR #17505:
URL: https://github.com/apache/hudi/pull/17505#discussion_r2738656835
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkValidatorUtils.java:
##########
@@ -146,14 +149,37 @@ public static Dataset<Row>
getRecordsFromCommittedFiles(SQLContext sqlContext,
sqlContext.emptyDataFrame().rdd(), newStructTypeSchema);
}
}
- return readRecordsForBaseFiles(sqlContext, committedFiles);
+ return readRecordsForBaseFiles(sqlContext, committedFiles, table);
}
/**
* Get records from specified list of data files.
*/
- public static Dataset<Row> readRecordsForBaseFiles(SQLContext sqlContext,
List<String> baseFilePaths) {
- return
sqlContext.read().parquet(JavaScalaConverters.convertJavaListToScalaSeq(baseFilePaths));
+ public static Dataset<Row> readRecordsForBaseFiles(SQLContext sqlContext,
List<String> baseFilePaths,
+ HoodieTable table) {
+ final HoodieSchema readerSchema;
+ String schemaStr = table.getConfig().getWriteSchema();
+ boolean isPopulateMetaFieldsEnabled =
table.getConfig().populateMetaFields();
+ if (!StringUtils.isNullOrEmpty(schemaStr)) {
+ Schema schema = new
Schema.Parser().parse(table.getConfig().getWriteSchema());
+ readerSchema = HoodieSchema.fromAvroSchema(
+ isPopulateMetaFieldsEnabled ?
HoodieAvroUtils.addMetadataFields(schema) : schema);
+ } else {
+ LOG.warn("Schema not found from write config, defaulting to parsing
schema from latest commit.");
+ try {
+ readerSchema = new
TableSchemaResolver(table.getMetaClient()).getTableSchema();
+ } catch (Exception e) {
+ LOG.error(String.format("Failed parsing schema from latest commit with
exception %s, "
Review Comment:
should this be `warn` level.
##########
hudi-spark-datasource/hudi-spark/src/test/java/org/apache/hudi/client/functional/TestHoodieClientOnCopyOnWriteStorage.java:
##########
@@ -1171,6 +1173,57 @@ public void
testInflightClusteringRollbackWhenUpdatesAllowed(boolean rollbackPen
}
}
+ @Test
+ public void testWriteSchemaUsageOnPreCommitValidators() throws Exception {
+ // Create first commit on DEFAULT_FIRST_PARTITION_PATH partition with
basic schema
+ String schemaStr = TRIP_EXAMPLE_SCHEMA;
+ HoodieWriteConfig writeConfig =
+ getConfigBuilder(schemaStr)
+ .withCompactionConfig(HoodieCompactionConfig.newBuilder()
+ .compactionSmallFileSize(0)
+ .build())
+ .build();
+ SparkRDDWriteClient client = getHoodieWriteClient(writeConfig);
+ HoodieTestDataGenerator dataGen =
+ new HoodieTestDataGenerator(new
String[]{DEFAULT_FIRST_PARTITION_PATH});
+ String firstCommit = client.startCommit();
+ List<HoodieRecord> firstBatch =
dataGen.generateInsertsAsPerSchema(firstCommit, 10, schemaStr);
+ JavaRDD<HoodieRecord> records =
context.getJavaSparkContext().parallelize(firstBatch, 1);
+ JavaRDD<WriteStatus> writeStatuses = client.insert(records, firstCommit);
+ client.commit(firstCommit, writeStatuses);
+
+ // Not create another commit on DEFAULT_SECOND_PARTITION_PATH partition
+ // with schema that contains new columns.
+ String secondCommit = client.startCommit();
+ String latestSchemaStr = TRIP_EXAMPLE_SCHEMA_EVOLVED_1;
+ List<HoodieRecord> secondBatch =
dataGen.generateInsertsAsPerSchema(secondCommit, 10, latestSchemaStr);
+ records = context.getJavaSparkContext().parallelize(secondBatch, 1);
+ writeStatuses = client.insert(records, secondCommit);
+ client.commit(secondCommit, writeStatuses);
+
+ // Create cluster commit on DEFAULT_FIRST_PARTITION_PATH partition
+ // Here pass in precommit validator as SqlQueryEqualityPreCommitValidator
and check that trip_id is not null.
+ HoodiePreCommitValidatorConfig preCommitValidatorConfig =
HoodiePreCommitValidatorConfig
+ .newBuilder()
+
.withPreCommitValidator(SqlQueryEqualityPreCommitValidator.class.getName())
+ .withPrecommitValidatorEqualitySqlQueries(
+ "select "
+ + "sum(hash(driver)) as hash_driver, "
+ + "sum(hash(rider)) as hash_rider, "
+ + "sum(hash(extra_column1)) as hash_extra_column1 "
+ + "from <TABLE_NAME>")
+ .build();
+ HoodieWriteConfig clusteringWriteConfig =
+ getConfigBuilder(latestSchemaStr)
+ .withClusteringConfig(createClusteringBuilder(true, 1).build())
+ .withPreCommitValidatorConfig(preCommitValidatorConfig)
+ .build();
+ SparkRDDWriteClient clusteringWriter =
getHoodieWriteClient(clusteringWriteConfig);
+ Option<String> clusteringInstant =
clusteringWriter.scheduleClustering(Option.empty());
+ assertTrue(clusteringInstant.isPresent());
+ clusteringWriter.cluster(clusteringInstant.get());
Review Comment:
is this test failing w/ the fix in the source code?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]