Github user tliron commented on a diff in the pull request:
https://github.com/apache/incubator-ariatosca/pull/207#discussion_r153008314
--- Diff: aria/parser/consumption/presentation.py ---
@@ -86,52 +73,193 @@ def dump(self):
self.context.presentation.presenter._dump(self.context)
def _handle_exception(self, e):
- if isinstance(e, AlreadyReadException):
+ if isinstance(e, _Skip):
return
super(Read, self)._handle_exception(e)
- def _present(self, location, origin_location, presenter_class,
executor):
+ def _present_all(self):
+ location = self.context.presentation.location
+
+ if location is None:
+ self.context.validation.report('Read consumer: missing
location')
+ return
+
+ executor = self.context.presentation.create_executor()
+ try:
+ # This call may recursively submit tasks to the executor if
there are imports
+ main = self._present(location, None, None, executor)
+
+ # Wait for all tasks to complete
+ executor.drain()
+
+ # Handle exceptions
+ for e in executor.exceptions:
+ self._handle_exception(e)
+
+ results = executor.returns or []
+ finally:
+ executor.close()
+
+ results.insert(0, main)
+
+ return main, results
+
+ def _present(self, location, origin_canonical_location,
origin_presenter_class, executor):
# Link the context to this thread
self.context.set_thread_local()
- raw = self._read(location, origin_location)
+ # Canonicalize the location
+ if self.context.reading.reader is None:
+ loader, canonical_location = self._create_loader(location,
origin_canonical_location)
+ else:
+ # If a reader is specified in the context then we skip loading
+ loader = None
+ canonical_location = location
+
+ # Skip self imports
+ if canonical_location == origin_canonical_location:
+ raise _Skip()
+
+ if self.context.presentation.cache:
+ # Is the presentation in the global cache?
+ try:
+ presentation = PRESENTATION_CACHE[canonical_location]
+ return _Result(presentation, canonical_location,
origin_canonical_location)
+ except KeyError:
+ pass
+
+ try:
+ # Is the presentation in the local cache?
+ presentation = self._cache[canonical_location]
+ return _Result(presentation, canonical_location,
origin_canonical_location)
+ except KeyError:
+ pass
+
+ # Create and cache new presentation
+ presentation = self._create_presentation(canonical_location,
loader, origin_presenter_class)
+ self._cache[canonical_location] = presentation
+ # Submit imports to executor
+ if hasattr(presentation, '_get_import_locations'):
+ import_locations =
presentation._get_import_locations(self.context)
+ if import_locations:
+ for import_location in import_locations:
+ import_location = UriLocation(import_location)
+ executor.submit(self._present, import_location,
canonical_location,
+ presentation.__class__, executor)
+
+ return _Result(presentation, canonical_location,
origin_canonical_location)
+
+ def _create_loader(self, location, origin_canonical_location):
+ loader =
self.context.loading.loader_source.get_loader(self.context.loading, location,
+
origin_canonical_location)
+
+ canonical_location = None
+
+ if origin_canonical_location is not None:
--- End diff --
Perhaps the goal wasn't clear here: "location" is relative, while
"canonical_location" is globally absolute. So the cache key has to be a
combination of both, hence a tuple. I will add documentation to clarify this.
---