jedcunningham commented on code in PR #43520:
URL: https://github.com/apache/airflow/pull/43520#discussion_r1844644649
##########
airflow/jobs/scheduler_job_runner.py:
##########
@@ -1767,48 +1780,137 @@ def _send_dag_callbacks_to_processor(self, dag: DAG,
callback: DagCallbackReques
self.log.debug("callback is empty")
@provide_session
- def _fail_tasks_stuck_in_queued(self, session: Session = NEW_SESSION) ->
None:
+ def _handle_tasks_stuck_in_queued(self, session: Session = NEW_SESSION) ->
None:
"""
- Mark tasks stuck in queued for longer than `task_queued_timeout` as
failed.
+
+ Handle the scenario where a task is queued for longer than
`task_queued_timeout`.
Tasks can get stuck in queued for a wide variety of reasons (e.g.
celery loses
track of a task, a cluster can't further scale up its workers, etc.),
but tasks
- should not be stuck in queued for a long time. This will mark tasks
stuck in
- queued for longer than `self._task_queued_timeout` as failed. If the
task has
- available retries, it will be retried.
+ should not be stuck in queued for a long time.
+
+ Originally, we simply marked a task as failed when it was stuck in
queued for
+ too long. We found that this led to suboptimal outcomes as ideally we
would like "failed"
+ to mean that a task was unable to run, instead of it meaning that we
were unable to run the task.
+
+ As a compromise between always failing a stuck task and always
rescheduling a stuck task (which could
Review Comment:
We don't way what the compromise is. Should reword this a bit.
##########
providers/src/airflow/providers/celery/executors/celery_executor.py:
##########
@@ -433,31 +434,34 @@ def try_adopt_task_instances(self, tis:
Sequence[TaskInstance]) -> Sequence[Task
return not_adopted_tis
+ @deprecated(
+ reason="Replaced by function `revoke_task`. Upgrade airflow core to
make this go away.",
Review Comment:
hmm, I wonder if we should instead not deprecate this, but leave a comment
that it can be removed when the min airflow version for providers is 2.11?
##########
airflow/jobs/scheduler_job_runner.py:
##########
@@ -1767,48 +1780,137 @@ def _send_dag_callbacks_to_processor(self, dag: DAG,
callback: DagCallbackReques
self.log.debug("callback is empty")
@provide_session
- def _fail_tasks_stuck_in_queued(self, session: Session = NEW_SESSION) ->
None:
+ def _handle_tasks_stuck_in_queued(self, session: Session = NEW_SESSION) ->
None:
"""
- Mark tasks stuck in queued for longer than `task_queued_timeout` as
failed.
+
+ Handle the scenario where a task is queued for longer than
`task_queued_timeout`.
Tasks can get stuck in queued for a wide variety of reasons (e.g.
celery loses
track of a task, a cluster can't further scale up its workers, etc.),
but tasks
- should not be stuck in queued for a long time. This will mark tasks
stuck in
- queued for longer than `self._task_queued_timeout` as failed. If the
task has
- available retries, it will be retried.
+ should not be stuck in queued for a long time.
+
+ Originally, we simply marked a task as failed when it was stuck in
queued for
+ too long. We found that this led to suboptimal outcomes as ideally we
would like "failed"
+ to mean that a task was unable to run, instead of it meaning that we
were unable to run the task.
+
+ As a compromise between always failing a stuck task and always
rescheduling a stuck task (which could
+ lead to tasks being stuck in queued forever without informing the
user).
"""
- self.log.debug("Calling SchedulerJob._fail_tasks_stuck_in_queued
method")
+ tasks_stuck_in_queued = self._get_tis_stuck_in_queued(session)
+ for executor, stuck_tis in
self._executor_to_tis(tasks_stuck_in_queued).items():
+ try:
+ for ti in stuck_tis:
+ executor.revoke_task(ti=ti)
+ self._maybe_requeue_stuck_ti(
+ ti=ti,
+ session=session,
+ )
+ except NotImplementedError:
+ # this block only gets entered if the executor has not
implemented `revoke_task`.
+ # in which case, we try the fallback logic
+ # todo: remove the call to _stuck_in_queued_backcompat_logic
in airflow 3.0.
+ # after 3.0, `cleanup_stuck_queued_tasks` will be removed,
so we should
+ # just continue immediately.
+ self._stuck_in_queued_backcompat_logic(executor, stuck_tis)
+ continue
- tasks_stuck_in_queued = session.scalars(
+ def _get_tis_stuck_in_queued(self, session) -> Iterable[TaskInstance]:
+ """Query db for TIs that are stuck in queued."""
+ return session.scalars(
select(TI).where(
TI.state == TaskInstanceState.QUEUED,
TI.queued_dttm < (timezone.utcnow() -
timedelta(seconds=self._task_queued_timeout)),
TI.queued_by_job_id == self.job.id,
)
- ).all()
+ )
- for executor, stuck_tis in
self._executor_to_tis(tasks_stuck_in_queued).items():
- try:
- cleaned_up_task_instances =
set(executor.cleanup_stuck_queued_tasks(tis=stuck_tis))
- for ti in stuck_tis:
- if repr(ti) in cleaned_up_task_instances:
- self.log.warning(
- "Marking task instance %s stuck in queued as
failed. "
- "If the task instance has available retries, it
will be retried.",
- ti,
- )
- session.add(
- Log(
- event="stuck in queued",
- task_instance=ti.key,
- extra=(
- "Task will be marked as failed. If the
task instance has "
- "available retries, it will be retried."
- ),
- )
- )
- except NotImplementedError:
- self.log.debug("Executor doesn't support cleanup of stuck
queued tasks. Skipping.")
+ def _maybe_requeue_stuck_ti(self, *, ti, session):
+ """
+ Requeue task if it has not been attempted too many times.
+
+ Otherwise, fail it.
+ """
+ num_times_stuck = self._get_num_times_stuck_in_queued(ti, session)
+ if num_times_stuck < self._num_stuck_queued_retries:
+ self.log.info("Task stuck in queued; will try to requeue.
task_id=%s", ti.task_id)
+ session.add(
+ Log(
+ event=TASK_STUCK_IN_QUEUED_RESCHEDULE_EVENT,
+ task_instance=ti.key,
+ extra=(
+ f"Task was in queued state for longer than
{self._task_queued_timeout} "
+ "seconds; task state will be set back to scheduled."
+ ),
+ )
+ )
+ self._reschedule_stuck_task(ti)
+ else:
+ self.log.info(
+ "Task requeue attempts exceeded max; marking failed.
task_instance=%s",
+ ti,
+ )
+ session.add(
+ Log(
+ event="stuck in queued tries exceeded",
+ task_instance=ti.key,
+ extra=f"Task was requeued more than
{self._num_stuck_queued_retries} times and will be failed.",
+ )
+ )
+ ti.set_state(TaskInstanceState.FAILED, session=session)
+
+ @deprecated(
+ reason="This is backcompat layer for older executor interface. Should
be removed in 3.0",
+ category=RemovedInAirflow3Warning,
+ action="ignore",
+ )
+ def _stuck_in_queued_backcompat_logic(self, executor, stuck_tis):
+ """
+ Try to invoke stuck in queued cleanup for older executor interface.
+
+ TODO: remove in airflow 3.0
+
+ Here we handle case where the executor pre-dates the interface change
that
+ introduced `cleanup_tasks_stuck_in_queued` and deprecated
`cleanup_stuck_queued_tasks`.
+
+ """
+ with suppress(NotImplementedError):
+ for ti_repr in executor.cleanup_stuck_queued_tasks(tis=stuck_tis):
+ self.log.warning(
+ "Task instance %s stuck in queued. May be set to failed.",
Review Comment:
If we are calling `cleanup_stuck_queued_tasks`, won't it be set to failed,
not "may be"?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]