robertwb commented on a change in pull request #11148: [BEAM-8335] Adds a
streaming wordcount integration test
URL: https://github.com/apache/beam/pull/11148#discussion_r397536367
##########
File path:
sdks/python/apache_beam/runners/interactive/interactive_runner_test.py
##########
@@ -147,6 +150,97 @@ def process(self, element):
]
self.assertEqual(actual_reified, expected_reified)
+ def test_streaming_wordcount(self):
+ class WordExtractingDoFn(beam.DoFn):
+ def process(self, element):
+ text_line = element.strip()
+ words = text_line.split()
+ return words
+
+ # Add the TestStream so that it can be cached.
+ ib.options.capturable_sources.add(TestStream)
+ ib.options.capture_duration = timedelta(seconds=1)
+
+ p = beam.Pipeline(
+ runner=interactive_runner.InteractiveRunner(),
+ options=StandardOptions(streaming=True))
+
+ data = (
+ p
+ | TestStream()
+ .advance_watermark_to(0)
+ .advance_processing_time(1)
+ .add_elements(['to', 'be', 'or', 'not', 'to', 'be'])
+ .advance_watermark_to(20)
+ .advance_processing_time(1)
+ .add_elements(['to', 'be', 'or', 'not', 'to', 'be'])
+ .advance_watermark_to(40)
+ .advance_processing_time(1)
+ .add_elements(['to', 'be', 'or', 'not', 'to', 'be'])
+ | beam.WindowInto(beam.window.FixedWindows(10))) # yapf: disable
+
+ counts = (
+ data
+ | 'split' >> beam.ParDo(WordExtractingDoFn())
+ | 'pair_with_one' >> beam.Map(lambda x: (x, 1))
+ | 'group' >> beam.GroupByKey()
+ | 'count' >> beam.Map(lambda wordones: (wordones[0],
sum(wordones[1]))))
+
+ # Watch the local scope for Interactive Beam so that referenced
PCollections
+ # will be cached.
+ ib.watch(locals())
+
+ # This is normally done in the interactive_utils when a transform is
+ # applied but needs an IPython environment. So we manually run this here.
+ ie.current_env().track_user_pipelines()
+
+ # This tests that the data was correctly cached.
+ pane_info = PaneInfo(True, True, PaneInfoTiming.UNKNOWN, 0, 0)
+ expected_data_df = pd.DataFrame(
+ [('to', 0, [beam.window.IntervalWindow(0, 10)], pane_info),
Review comment:
Thanks, that's much better--now I can read it and know the answer is right.
(Duplicates would have been OK, and the data set could be smaller still and
still provide the right coverage, but this is fine.)
LGMT
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
With regards,
Apache Git Services