the-other-tim-brown commented on code in PR #768:
URL: https://github.com/apache/incubator-xtable/pull/768#discussion_r2838188437
##########
xtable-core/src/test/java/org/apache/xtable/parquet/ITParquetConversionSource.java:
##########
@@ -184,77 +200,43 @@ private ConversionSourceProvider<?>
getConversionSourceProvider(String sourceTab
}
}
- @ParameterizedTest
- @MethodSource("provideArgsForFileNonPartitionTesting")
- public void testFileNonPartitionedData(
- TableFormatPartitionDataHolder tableFormatPartitionDataHolder) throws
URISyntaxException {
- String tableName = getTableName();
- String sourceTableFormat =
tableFormatPartitionDataHolder.getSourceTableFormat();
- List<String> targetTableFormats =
tableFormatPartitionDataHolder.getTargetTableFormats();
- String xTablePartitionConfig =
tableFormatPartitionDataHolder.getXTablePartitionConfig();
- ConversionSourceProvider<?> conversionSourceProvider =
- getConversionSourceProvider(sourceTableFormat);
-
- List<Row> data =
- Arrays.asList(
- RowFactory.create(1, "Alice", true, 30.1, new
Timestamp(System.currentTimeMillis())),
- RowFactory.create(
- 2, "Bob", false, 24.6, new
Timestamp(System.currentTimeMillis() + 1000)),
- RowFactory.create(
- 3, "Charlie", true, 35.2, new
Timestamp(System.currentTimeMillis() + 2000)),
- RowFactory.create(
- 4, "David", false, 29.5, new
Timestamp(System.currentTimeMillis() + 3000)),
- RowFactory.create(
- 5, "Eve", true, 22.2, new Timestamp(System.currentTimeMillis()
+ 4000)));
-
- schema =
- DataTypes.createStructType(
- new StructField[] {
- DataTypes.createStructField("id", DataTypes.IntegerType, false),
- DataTypes.createStructField("name", DataTypes.StringType, false),
- DataTypes.createStructField("hasSiblings",
DataTypes.BooleanType, false),
- DataTypes.createStructField("age", DataTypes.DoubleType, false),
- DataTypes.createStructField(
- "timestamp",
- DataTypes.TimestampType,
- false,
- new MetadataBuilder().putString("precision",
"millis").build())
- });
- Dataset<Row> df = sparkSession.createDataFrame(data, schema);
- String dataPath = tempDir.toAbsolutePath().toString() +
"/non_partitioned_data";
- df.write().mode(SaveMode.Overwrite).parquet(dataPath);
- GenericTable table;
- table =
- GenericTable.getInstance(
- tableName, Paths.get(dataPath), sparkSession, jsc,
sourceTableFormat, false);
- try (GenericTable tableToClose = table) {
- ConversionConfig conversionConfig =
- getTableSyncConfig(
- sourceTableFormat,
- SyncMode.FULL,
- tableName,
- table,
- targetTableFormats,
- xTablePartitionConfig,
- null);
- ConversionController conversionController =
- new ConversionController(jsc.hadoopConfiguration());
- conversionController.sync(conversionConfig, conversionSourceProvider);
- checkDatasetEquivalenceWithFilter(sourceTableFormat, tableToClose,
targetTableFormats, false);
+ private void cleanupTargetMetadata(String dataPath, List<String> formats) {
Review Comment:
The test should not be updated to hide the bug. The bug needs to be fixed in
the feature.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]