rohdesamuel commented on a change in pull request #12415:
URL: https://github.com/apache/beam/pull/12415#discussion_r473308012



##########
File path: sdks/python/apache_beam/runners/interactive/recording_manager.py
##########
@@ -0,0 +1,329 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+from __future__ import absolute_import
+
+import logging
+import threading
+import time
+import warnings
+
+import apache_beam as beam
+from apache_beam.runners.interactive import background_caching_job as bcj
+from apache_beam.runners.interactive import interactive_environment as ie
+from apache_beam.runners.interactive import interactive_runner as ir
+from apache_beam.runners.interactive import pipeline_fragment as pf
+from apache_beam.runners.interactive import pipeline_instrument as pi
+from apache_beam.runners.interactive import utils
+from apache_beam.runners.interactive.options.capture_limiters import 
CountLimiter
+from apache_beam.runners.interactive.options.capture_limiters import 
ProcessingTimeLimiter
+
+_LOGGER = logging.getLogger(__name__)
+
+PipelineState = beam.runners.runner.PipelineState
+
+
+class ElementStream:
+  """A stream of elements from a given PCollection."""
+  def __init__(
+      self,
+      pcoll,  # type: beam.pvalue.PCollection
+      var,  # type: str
+      cache_key,  # type: str
+      max_n,  # type: int
+      max_duration_secs  # type: float
+      ):
+    self._pcoll = pcoll
+    self._cache_key = cache_key
+    self._pipeline = pcoll.pipeline
+    self._var = var
+    self._n = max_n
+    self._duration_secs = max_duration_secs
+
+    # A small state variable that when True, indicates that no more new 
elements
+    # will be yielded if read() is called again.
+    self._done = False
+
+  def var(self):
+    # type: () -> str
+
+    """Returns the variable named that defined this PCollection."""
+    return self._var
+
+  def display_id(self, suffix):
+    #Any type: (str) -> str
+
+    """Returns a unique id able to be displayed in a web browser."""
+    return utils.obfuscate(self._cache_key, suffix)
+
+  def is_computed(self):
+    # type: () -> boolean
+
+    """Returns True if no more elements will be recorded."""
+    return self._pcoll in ie.current_env().computed_pcollections
+
+  def is_done(self):
+    # type: () -> boolean
+
+    """Returns True if no more new elements will be yielded."""
+    return self._done
+
+  def read(self, tail=True):
+    # type: (boolean) -> Any
+
+    """Reads the elements currently recorded."""
+
+    # Get the cache manager and wait until the file exists.
+    cache_manager = ie.current_env().get_cache_manager(self._pipeline)
+    while not cache_manager.exists('full', self._cache_key):
+      time.sleep(0.5)
+
+    # Retrieve the coder for the particular PCollection which will be used to
+    # decode elements read from cache.
+    coder = cache_manager.load_pcoder('full', self._cache_key)
+
+    # Read the elements from the cache.
+    limiters = [
+        CountLimiter(self._n), ProcessingTimeLimiter(self._duration_secs)
+    ]
+    reader, _ = cache_manager.read('full', self._cache_key,
+                                   limiters=limiters,
+                                   tail=tail)
+
+    # Because a single TestStreamFileRecord can yield multiple elements, we
+    # limit the count again here in the to_element_list call.
+    for e in utils.to_element_list(reader,
+                                   coder,
+                                   include_window_info=True,
+                                   n=self._n):
+      yield e
+
+    # A limiter being triggered means that we have fulfilled the user's 
request.
+    # This implies that reading from the cache again won't yield any new
+    # elements. WLOG, this applies to the user pipeline being terminated.
+    if any(l.is_triggered()
+           for l in limiters) or 
ie.current_env().is_terminated(self._pipeline):
+      self._done = True

Review comment:
       In that case, it means that there are still more elements to be read 
from the cache.
   
   If the pipeline isn't finished (and the limiters aren't triggered), the 
cache will yield an incomplete set of data. Thus, if a user of the 
ElementStream were to read from the cache again, the cache would yield more 
results. 
   
   There are two ways to exit from reading from cache: limiters are triggered, 
or all elements in the cache have been read. Just because all the elements in 
the cache have been read doesn't imply that the pipeline is done nor that the 
limiters have been triggered.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to