Github user tliron commented on a diff in the pull request:
https://github.com/apache/incubator-ariatosca/pull/207#discussion_r153006931
--- Diff: aria/parser/consumption/presentation.py ---
@@ -31,47 +32,33 @@ class Read(Consumer):
instances.
It supports agnostic raw data composition for presenters that have
- ``_get_import_locations`` and ``_merge_import``.
+ ``_get_import_locations``, ``_validate_import``, and ``_merge_import``.
To improve performance, loaders are called asynchronously on separate
threads.
Note that parsing may internally trigger more than one
loading/reading/presentation
cycle, for example if the agnostic raw data has dependencies that must
also be parsed.
"""
- def consume(self):
- if self.context.presentation.location is None:
- self.context.validation.report('Presentation consumer: missing
location')
- return
-
- presenter = None
- imported_presentations = None
+ def __init__(self, context):
+ super(Read, self).__init__(context)
+ self._cache = {}
- executor =
FixedThreadPoolExecutor(size=self.context.presentation.threads,
-
timeout=self.context.presentation.timeout)
- executor.print_exceptions =
self.context.presentation.print_exceptions
- try:
- presenter = self._present(self.context.presentation.location,
None, None, executor)
- executor.drain()
-
- # Handle exceptions
- for e in executor.exceptions:
- self._handle_exception(e)
+ def consume(self):
+ # Present the main location and all imports recursively
+ main, results = self._present_all()
- imported_presentations = executor.returns
- finally:
- executor.close()
+ # Merge presentations
+ main.merge(results, self.context)
- # Merge imports
- if (imported_presentations is not None) and hasattr(presenter,
'_merge_import'):
- for imported_presentation in imported_presentations:
- okay = True
- if hasattr(presenter, '_validate_import'):
- okay = presenter._validate_import(self.context,
imported_presentation)
- if okay:
- presenter._merge_import(imported_presentation)
+ # Cache merged presentations
+ if self.context.presentation.cache:
+ for result in results:
+ result.cache()
--- End diff --
Yes, it needs to be global for high-efficiency concurrent testing: there,
we have many threads each creating their own contexts and consumers, but we
want them to all enjoy the cached results.
---