TheNeuralBit commented on code in PR #17671:
URL: https://github.com/apache/beam/pull/17671#discussion_r876045980
##########
sdks/python/apache_beam/ml/inference/base_test.py:
##########
@@ -133,14 +133,14 @@ def test_timing_metrics(self):
MetricsFilter().with_name('inference_batch_latency_micro_secs')))
Review Comment:
I think this metric should be renamed if we're changing the precision
##########
sdks/python/apache_beam/ml/inference/base.py:
##########
@@ -150,27 +155,24 @@ def update(
class _RunInferenceDoFn(beam.DoFn):
"""A DoFn implementation generic to frameworks."""
- def __init__(self, model_loader: ModelLoader, clock=None):
+ def __init__(self, model_loader: ModelLoader, clock):
self._model_loader = model_loader
self._inference_runner = model_loader.get_inference_runner()
self._shared_model_handle = shared.Shared()
self._metrics_collector = _MetricsCollector(
self._inference_runner.get_metrics_namespace())
self._clock = clock
- if not clock:
- self._clock = _ClockFactory.make_clock()
self._model = None
def _load_model(self):
def load():
"""Function for constructing shared LoadedModel."""
memory_before = _get_current_process_memory_in_bytes()
- start_time = self._clock.get_current_time_in_microseconds()
+ start_time = _to_milliseconds(self._clock.time())
model = self._model_loader.load_model()
- end_time = self._clock.get_current_time_in_microseconds()
+ end_time = _to_milliseconds(self._clock.time())
Review Comment:
A couple questions here:
- why change the units to milliseconds?
- maybe we should use
[time_ns](https://docs.python.org/3/library/time.html#time.time_ns) instead?
The time docs say time_ns should be preferred to avoid precision loss. It's py
3.7+ only, but we've dropped support for 3.6 now
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]