ashb commented on a change in pull request #11589:
URL: https://github.com/apache/airflow/pull/11589#discussion_r512897303
##########
File path: airflow/models/dagrun.py
##########
@@ -380,27 +395,21 @@ def update_state(
self.last_scheduling_decision = start_dttm
dag = self.get_dag()
- ready_tis: List[TI] = []
- tis = list(self.get_task_instances(session=session,
state=State.task_states + (State.SHUTDOWN,)))
- self.log.debug("number of tis tasks for %s: %s task(s)", self,
len(tis))
- for ti in tis:
- ti.task = dag.get_task(ti.task_id)
+ info = self.task_instance_scheduling_decisions(session)
+
+ tis = info['tis']
+ schedulable_tis = info['schedulable_tis']
+ changed_tis = info['changed_tis']
+ finished_tasks = info['finished_tasks']
+ unfinished_tasks = info['unfinished_tasks']
- unfinished_tasks = [t for t in tis if t.state in State.unfinished]
- finished_tasks = [t for t in tis if t.state in State.finished]
none_depends_on_past = all(not t.task.depends_on_past for t in
unfinished_tasks)
none_task_concurrency = all(t.task.task_concurrency is None for t in
unfinished_tasks)
- if unfinished_tasks:
- scheduleable_tasks = [ut for ut in unfinished_tasks if ut.state in
SCHEDULEABLE_STATES]
- self.log.debug(
- "number of scheduleable tasks for %s: %s task(s)",
- self, len(scheduleable_tasks))
- ready_tis, changed_tis = self._get_ready_tis(scheduleable_tasks,
finished_tasks, session)
- self.log.debug("ready tis length for %s: %s task(s)", self,
len(ready_tis))
- if none_depends_on_past and none_task_concurrency:
- # small speed up
- are_runnable_tasks = ready_tis or self._are_premature_tis(
- unfinished_tasks, finished_tasks, session) or changed_tis
+
+ if unfinished_tasks and none_depends_on_past and none_task_concurrency:
+ # small speed up
Review comment:
Bad/lazy refactor most likely.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]