potiuk commented on a change in pull request #10956:
URL: https://github.com/apache/airflow/pull/10956#discussion_r492048248



##########
File path: airflow/jobs/backfill_job.py
##########
@@ -629,6 +629,7 @@ def _per_task_process(task, key, ti, session=None):  # 
pylint: disable=too-many-
             _dag_runs = ti_status.active_runs[:]
             for run in _dag_runs:
                 run.update_state(session=session)
+                session.merge(run)

Review comment:
       Can this be a separate PR?

##########
File path: airflow/jobs/scheduler_job.py
##########
@@ -1179,39 +832,50 @@ def __get_concurrency_maps(
 
     # pylint: disable=too-many-locals,too-many-statements
     @provide_session
-    def _find_executable_task_instances(
-        self,
-        simple_dag_bag: SimpleDagBag,
-        session: Session = None
-    ) -> List[TI]:
+    def _executable_task_instances_to_queued(self, max_tis: int, session: 
Session = None) -> List[TI]:
         """
         Finds TIs that are ready for execution with respect to pool limits,
         dag concurrency, executor state, and priority.
 
-        :param simple_dag_bag: TaskInstances associated with DAGs in the
-            simple_dag_bag will be fetched from the DB and executed
-        :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
+        :param max_tis: Maximum number of TIs to queue in this loop.
+        :type max_tis: int
         :return: list[airflow.models.TaskInstance]
         """
         executable_tis: List[TI] = []
 
+        # Get the pool settings. We get a lock on the pool rows, treating this 
as a "critical section"
+        # Throws an exception if lock cannot be obtained, rather than blocking
+        pools = models.Pool.slots_stats(with_for_update=nowait(session), 
session=session)
+
+        # If the pools are full, there is no point doing anything!
+        # If _somehow_ the pool is overfull, don't let the limit go negative - 
it breaks SQL
+        pool_slots_free = max(0, sum(map(operator.itemgetter('open'), 
pools.values())))
+
+        if pool_slots_free == 0:
+            self.log.debug("All pools are full!")
+            return executable_tis
+
+        max_tis = min(max_tis, pool_slots_free)
+
         # Get all task instances associated with scheduled
         # DagRuns which are not backfilled, in the given states,
         # and the dag is not paused
         task_instances_to_examine: List[TI] = (
             session
             .query(TI)
-            .filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
-            .outerjoin(
-                DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == 
TI.execution_date)
-            )
-            .filter(or_(DR.run_id.is_(None), DR.run_type != 
DagRunType.BACKFILL_JOB.value))
-            .outerjoin(DM, DM.dag_id == TI.dag_id)
-            .filter(or_(DM.dag_id.is_(None), not_(DM.is_paused)))
+            .outerjoin(TI.dag_run)
+            .filter(or_(DR.run_id.is_(None),
+                        DR.run_type != DagRunType.BACKFILL_JOB.value))
+            .join(TI.dag_model)
+            .filter(not_(DM.is_paused))
             .filter(TI.state == State.SCHEDULED)
+            .options(selectinload('dag_model'))
+            .limit(max_tis)
+            .with_for_update(of=TI, **skip_locked(session=session))

Review comment:
       Seems that "with_for_update of " is missing in 5.7? Seems that in this 
case TI Aand DR tables both will be locked in this case? I guess it is 
something that we should also worry about? Seems that in this case we might hit 
some limitations on parallelism in 5.7.

##########
File path: airflow/jobs/scheduler_job.py
##########
@@ -1052,6 +703,8 @@ def __init__(
         self.max_tis_per_query: int = conf.getint('scheduler', 
'max_tis_per_query')
         self.processor_agent: Optional[DagFileProcessorAgent] = None
 
+        self.dagbag = DagBag(read_dags_from_db=True)

Review comment:
       Should we separate the DagBag (read_dags_from_db=True) out from the 
DagBag functionality regarding file reading ? Seems that 
DagBag(read_dags_from_db=True) is a pass-trought to retrieve Dag via it's Id in 
a lazy way? 

##########
File path: airflow/jobs/scheduler_job.py
##########
@@ -1705,62 +1305,216 @@ def _run_scheduler_loop(self) -> None:
             loop_duration = loop_end_time - loop_start_time
             self.log.debug("Ran scheduling loop in %.2f seconds", 
loop_duration)
 
-            if not is_unit_test:
+            if not is_unit_test and not num_queued_tis and not 
num_finished_events:
+                # If the scheduler is doing things, don't sleep. This means 
when there is work to do, the
+                # scheduler will run "as quick as possible", but when it's 
stopped, it can sleep, dropping CPU
+                # usage when "idle"
                 time.sleep(self._processor_poll_interval)
 
-            if self.processor_agent.done:
+            if self.num_runs > 0 and loop_count >= self.num_runs and 
self.processor_agent.done:
                 self.log.info(
-                    "Exiting scheduler loop as all files have been processed 
%d times", self.num_runs
+                    "Exiting scheduler loop as requested number of runs (%d - 
got to %d) has been reached",
+                    self.num_runs, loop_count,
                 )
                 break
 
-    def _validate_and_run_task_instances(self, simple_dag_bag: SimpleDagBag) 
-> bool:
-        if simple_dag_bag.serialized_dags:
+    def _do_scheduling(self, session) -> int:
+        """
+        This function is where the main scheduling decisions take places. It:
+
+        - Creates any necessary DAG runs by examining the 
next_dagrun_create_after column of DagModel
+
+        - Finds the "next n oldest" running DAG Runs to examine for scheduling 
(n=20 by default) and tries to
+          progress state (TIs to SCHEDULED, or DagRuns to SUCCESS/FAILURE etc)
+
+          By "next oldest", we mean hasn't been examined/scheduled in the most 
time.
+
+        - Then, via a Critical Section (locking the rows of the Pool model) we 
queue tasks, and then send them
+          to the executor.
+
+          See docs of _critical_section_execute_task_instances for more.
+
+        :return: Number of TIs enqueued in this iteration
+        :rtype: int
+        """
+        try:
+            from sqlalchemy import event
+            expected_commit = False
+
+            # Put a check in place to make sure we don't commit unexpectedly
+            @event.listens_for(session.bind, 'commit')
+            def validate_commit(_):
+                nonlocal expected_commit
+                if expected_commit:
+                    expected_commit = False
+                    return
+                raise RuntimeError("UNEXPECTED COMMIT - THIS WILL BREAK HA 
LOCKS!")

Review comment:
       Is this something we plan to keep? We seem to catch Exceptions in 
various places - do we want to actually kill scheduler in this case or just log 
an error ? 

##########
File path: airflow/executors/base_executor.py
##########
@@ -69,16 +70,16 @@ def start(self):  # pragma: no cover
         """
 
     def queue_command(self,
-                      simple_task_instance: SimpleTaskInstance,

Review comment:
       Good one!




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to