RussellSpitzer commented on a change in pull request #3973:
URL: https://github.com/apache/iceberg/pull/3973#discussion_r810396577
##########
File path:
spark/v3.2/spark/src/main/java/org/apache/iceberg/spark/procedures/AddFilesProcedure.java
##########
@@ -169,27 +177,27 @@ private void importFileTable(Table table, Path
tableLocation, String format, Map
// Build a Global Partition for the source
SparkPartition partition = new SparkPartition(Collections.emptyMap(),
tableLocation.toString(), format);
- importPartitions(table, ImmutableList.of(partition),
checkDuplicateFiles);
+ importPartitions(table, ImmutableList.of(partition),
checkDuplicateFiles, parallelism);
} else {
Preconditions.checkArgument(!partitions.isEmpty(),
"Cannot find any matching partitions in table %s", partitions);
- importPartitions(table, partitions, checkDuplicateFiles);
+ importPartitions(table, partitions, checkDuplicateFiles, parallelism);
}
}
private void importCatalogTable(Table table, Identifier sourceIdent,
Map<String, String> partitionFilter,
- boolean checkDuplicateFiles) {
+ boolean checkDuplicateFiles, int
parallelism) {
String stagingLocation = getMetadataLocation(table);
TableIdentifier sourceTableIdentifier =
Spark3Util.toV1TableIdentifier(sourceIdent);
SparkTableUtil.importSparkTable(spark(), sourceTableIdentifier, table,
stagingLocation, partitionFilter,
- checkDuplicateFiles);
+ checkDuplicateFiles, parallelism);
}
private void importPartitions(Table table,
List<SparkTableUtil.SparkPartition> partitions,
- boolean checkDuplicateFiles) {
+ boolean checkDuplicateFiles, int parallelism) {
String stagingLocation = getMetadataLocation(table);
SparkTableUtil.importSparkPartitions(spark(), partitions, table,
table.spec(), stagingLocation,
- checkDuplicateFiles);
+ checkDuplicateFiles, parallelism);
Review comment:
nit format
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]