tomstepp commented on code in PR #37463:
URL: https://github.com/apache/beam/pull/37463#discussion_r2755585476
##########
sdks/java/core/src/test/java/org/apache/beam/sdk/io/FileIOTest.java:
##########
@@ -547,4 +565,77 @@ public void testFileIoDynamicNaming() throws IOException {
"Output file shard 0 exists after pipeline completes",
new File(outputFileName + "-0").exists());
}
+
+ @Test
+ @Category({NeedsRunner.class, UsesUnboundedPCollections.class})
+ public void testWriteUnboundedWithCustomBatchParameters() throws IOException
{
+ File root = tmpFolder.getRoot();
+ List<String> inputs = Arrays.asList("one", "two", "three", "four", "five",
"six");
+
+ PTransform<PCollection<String>, PCollection<String>> transform =
+ Window.<String>into(FixedWindows.of(Duration.standardSeconds(10)))
+ .triggering(AfterWatermark.pastEndOfWindow())
+ .withAllowedLateness(Duration.ZERO)
+ .discardingFiredPanes();
+
+ FileIO.Write<Void, String> write =
+ FileIO.<String>write()
+ .via(TextIO.sink())
+ .to(root.getAbsolutePath())
+ .withPrefix("output")
+ .withSuffix(".txt")
+ .withAutoSharding()
+ .withBatchSize(CUSTOM_FILE_TRIGGERING_RECORD_COUNT)
+ .withBatchSizeBytes(CUSTOM_FILE_TRIGGERING_BYTE_COUNT)
+
.withBatchMaxBufferingDuration(CUSTOM_FILE_TRIGGERING_RECORD_BUFFERING_DURATION);
+
+ // Prepare timestamps for the elements.
+ List<Long> timestamps = new ArrayList<>();
+ for (long i = 0; i < inputs.size(); i++) {
+ timestamps.add(i + 1);
+ }
+
+ p.apply(Create.timestamped(inputs,
timestamps).withCoder(StringUtf8Coder.of()))
+ .setIsBoundedInternal(IsBounded.UNBOUNDED)
+ .apply(transform)
+ .apply(write);
+ p.run().waitUntilFinish();
+
+ // Verify that the custom batch parameters are set.
+ assertEquals(CUSTOM_FILE_TRIGGERING_RECORD_COUNT,
write.getBatchSize().intValue());
+ assertEquals(CUSTOM_FILE_TRIGGERING_BYTE_COUNT,
write.getBatchSizeBytes().intValue());
+ assertEquals(
+ CUSTOM_FILE_TRIGGERING_RECORD_BUFFERING_DURATION,
write.getBatchMaxBufferingDuration());
+
+ checkFileContents(root, "output", inputs);
Review Comment:
Could we check that the limits are propagated and used in processing? For
example we could set a BatchSize value, and verify two batches/files are
created instead of one.
##########
sdks/java/core/src/test/java/org/apache/beam/sdk/io/FileIOTest.java:
##########
@@ -547,4 +565,77 @@ public void testFileIoDynamicNaming() throws IOException {
"Output file shard 0 exists after pipeline completes",
new File(outputFileName + "-0").exists());
}
+
+ @Test
+ @Category({NeedsRunner.class, UsesUnboundedPCollections.class})
+ public void testWriteUnboundedWithCustomBatchParameters() throws IOException
{
+ File root = tmpFolder.getRoot();
+ List<String> inputs = Arrays.asList("one", "two", "three", "four", "five",
"six");
+
+ PTransform<PCollection<String>, PCollection<String>> transform =
+ Window.<String>into(FixedWindows.of(Duration.standardSeconds(10)))
+ .triggering(AfterWatermark.pastEndOfWindow())
+ .withAllowedLateness(Duration.ZERO)
+ .discardingFiredPanes();
+
+ FileIO.Write<Void, String> write =
+ FileIO.<String>write()
+ .via(TextIO.sink())
+ .to(root.getAbsolutePath())
+ .withPrefix("output")
+ .withSuffix(".txt")
+ .withAutoSharding()
+ .withBatchSize(CUSTOM_FILE_TRIGGERING_RECORD_COUNT)
+ .withBatchSizeBytes(CUSTOM_FILE_TRIGGERING_BYTE_COUNT)
+
.withBatchMaxBufferingDuration(CUSTOM_FILE_TRIGGERING_RECORD_BUFFERING_DURATION);
+
+ // Prepare timestamps for the elements.
+ List<Long> timestamps = new ArrayList<>();
+ for (long i = 0; i < inputs.size(); i++) {
+ timestamps.add(i + 1);
+ }
+
+ p.apply(Create.timestamped(inputs,
timestamps).withCoder(StringUtf8Coder.of()))
+ .setIsBoundedInternal(IsBounded.UNBOUNDED)
+ .apply(transform)
+ .apply(write);
+ p.run().waitUntilFinish();
+
+ // Verify that the custom batch parameters are set.
+ assertEquals(CUSTOM_FILE_TRIGGERING_RECORD_COUNT,
write.getBatchSize().intValue());
+ assertEquals(CUSTOM_FILE_TRIGGERING_BYTE_COUNT,
write.getBatchSizeBytes().intValue());
+ assertEquals(
+ CUSTOM_FILE_TRIGGERING_RECORD_BUFFERING_DURATION,
write.getBatchMaxBufferingDuration());
+
+ checkFileContents(root, "output", inputs);
+ }
+
+ static void checkFileContents(File rootDir, String prefix, List<String>
inputs)
+ throws IOException {
+ List<File> outputFiles = Lists.newArrayList();
+ final String pattern = new File(rootDir, prefix).getAbsolutePath() + "*";
+ List<Metadata> metadata =
+
FileSystems.match(Collections.singletonList(pattern)).get(0).metadata();
+ for (Metadata meta : metadata) {
+ outputFiles.add(new File(meta.resourceId().toString()));
+ }
+ assertFalse("Should have produced at least 1 output file",
outputFiles.isEmpty());
+
+ List<String> actual = Lists.newArrayList();
+ for (File outputFile : outputFiles) {
+ List<String> actualShard = Lists.newArrayList();
+ try (BufferedReader reader =
+ Files.newBufferedReader(outputFile.toPath(),
StandardCharsets.UTF_8)) {
+ for (; ; ) {
Review Comment:
This may be clearer as a while loop with condition like `while (line !=
null)`.
##########
sdks/java/core/src/main/java/org/apache/beam/sdk/io/FileIO.java:
##########
@@ -74,10 +74,10 @@
import org.apache.beam.sdk.values.PCollectionView;
import org.apache.beam.sdk.values.TypeDescriptor;
import org.apache.beam.sdk.values.TypeDescriptors;
-import
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.annotations.VisibleForTesting;
-import
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.base.MoreObjects;
-import org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.base.Objects;
-import
org.apache.beam.vendor.guava.v32_1_2_jre.com.google.common.collect.Lists;
+import com.google.common.annotations.VisibleForTesting;
+import com.google.common.base.MoreObjects;
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
Review Comment:
I believe Beam is intentionally using Guava vendoring, so we should revert
this import change. @Abacn can confirm.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]