y1chi commented on a change in pull request #12709:
URL: https://github.com/apache/beam/pull/12709#discussion_r483288419
##########
File path:
sdks/python/apache_beam/testing/benchmarks/nexmark/nexmark_launcher.py
##########
@@ -200,46 +233,128 @@ def generate_events(self):
logging.info('Finished event generation.')
+ def read_from_file(self):
+ return (
+ self.pipeline
+ | 'reading_from_file' >> beam.io.ReadFromText(self.args.input)
+ | 'deserialization' >> beam.ParDo(nexmark_util.ParseJsonEventFn())
+ | 'timestamping' >>
+ beam.Map(lambda e: window.TimestampedValue(e, e.date_time)))
+
+ def read_from_pubsub(self):
# Read from PubSub into a PCollection.
- if self.args.subscription_name:
- raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
- subscription=sub.full_name)
+ if self.subscription_name:
+ raw_events = self.pipeline | 'ReadPubSub_sub' >> beam.io.ReadFromPubSub(
+ subscription=self.subscription_name,
+ with_attributes=True,
+ id_label='id',
+ timestamp_attribute='timestamp')
else:
- raw_events = self.pipeline | 'ReadPubSub' >> beam.io.ReadFromPubSub(
- topic=topic.full_name)
- raw_events = (
+ raw_events = self.pipeline | 'ReadPubSub_topic' >>
beam.io.ReadFromPubSub(
+ topic=self.topic_name,
+ with_attributes=True,
+ id_label='id',
+ timestamp_attribute='timestamp')
+ events = (
raw_events
- | 'deserialization' >> beam.ParDo(nexmark_util.ParseJsonEvnetFn())
- | 'timestamping' >>
- beam.Map(lambda e: window.TimestampedValue(e, e.date_time)))
- return raw_events
+ | 'pubsub_unwrap' >> beam.Map(lambda m: m.data)
+ | 'deserialization' >> beam.ParDo(nexmark_util.ParseJsonEventFn()))
+ return events
def run_query(self, query, query_args, query_errors):
try:
- self.parse_args()
self.pipeline = beam.Pipeline(options=self.pipeline_options)
nexmark_util.setup_coder()
event_monitor = Monitor('.events', 'event')
result_monitor = Monitor('.results', 'result')
- events = self.generate_events()
+ if self.streaming:
+ if self.pubsub_mode != 'SUBSCRIBE_ONLY':
+ self.generate_events()
+ if self.pubsub_mode == 'PUBLISH_ONLY':
+ return
+ events = self.read_from_pubsub()
+ else:
+ events = self.read_from_file()
+
events = events | 'event_monitor' >> beam.ParDo(event_monitor.doFn)
output = query.load(events, query_args)
output | 'result_monitor' >> beam.ParDo(result_monitor.doFn) # pylint:
disable=expression-not-assigned
result = self.pipeline.run()
- job_duration = (
-
self.pipeline_options.view_as(TestOptions).wait_until_finish_duration)
- if self.pipeline_options.view_as(StandardOptions).runner ==
'DataflowRunner': # pylint: disable=line-too-long
- result.wait_until_finish(duration=job_duration)
- result.cancel()
+ if self.runner == 'DataflowRunner':
+ result.wait_until_finish(duration=self.wait_until_finish_duration)
Review comment:
wait_until_finish duration is overall timeout, we don't need to wait for
python pipeline to spin up, and I don't think wait_until_finish was meant for
that.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]