potiuk commented on a change in pull request #10956:
URL: https://github.com/apache/airflow/pull/10956#discussion_r492188003



##########
File path: airflow/jobs/scheduler_job.py
##########
@@ -1179,39 +832,50 @@ def __get_concurrency_maps(
 
     # pylint: disable=too-many-locals,too-many-statements
     @provide_session
-    def _find_executable_task_instances(
-        self,
-        simple_dag_bag: SimpleDagBag,
-        session: Session = None
-    ) -> List[TI]:
+    def _executable_task_instances_to_queued(self, max_tis: int, session: 
Session = None) -> List[TI]:
         """
         Finds TIs that are ready for execution with respect to pool limits,
         dag concurrency, executor state, and priority.
 
-        :param simple_dag_bag: TaskInstances associated with DAGs in the
-            simple_dag_bag will be fetched from the DB and executed
-        :type simple_dag_bag: airflow.utils.dag_processing.SimpleDagBag
+        :param max_tis: Maximum number of TIs to queue in this loop.
+        :type max_tis: int
         :return: list[airflow.models.TaskInstance]
         """
         executable_tis: List[TI] = []
 
+        # Get the pool settings. We get a lock on the pool rows, treating this 
as a "critical section"
+        # Throws an exception if lock cannot be obtained, rather than blocking
+        pools = models.Pool.slots_stats(with_for_update=nowait(session), 
session=session)
+
+        # If the pools are full, there is no point doing anything!
+        # If _somehow_ the pool is overfull, don't let the limit go negative - 
it breaks SQL
+        pool_slots_free = max(0, sum(map(operator.itemgetter('open'), 
pools.values())))
+
+        if pool_slots_free == 0:
+            self.log.debug("All pools are full!")
+            return executable_tis
+
+        max_tis = min(max_tis, pool_slots_free)
+
         # Get all task instances associated with scheduled
         # DagRuns which are not backfilled, in the given states,
         # and the dag is not paused
         task_instances_to_examine: List[TI] = (
             session
             .query(TI)
-            .filter(TI.dag_id.in_(simple_dag_bag.dag_ids))
-            .outerjoin(
-                DR, and_(DR.dag_id == TI.dag_id, DR.execution_date == 
TI.execution_date)
-            )
-            .filter(or_(DR.run_id.is_(None), DR.run_type != 
DagRunType.BACKFILL_JOB.value))
-            .outerjoin(DM, DM.dag_id == TI.dag_id)
-            .filter(or_(DM.dag_id.is_(None), not_(DM.is_paused)))
+            .outerjoin(TI.dag_run)
+            .filter(or_(DR.run_id.is_(None),
+                        DR.run_type != DagRunType.BACKFILL_JOB.value))
+            .join(TI.dag_model)
+            .filter(not_(DM.is_paused))
             .filter(TI.state == State.SCHEDULED)
+            .options(selectinload('dag_model'))
+            .limit(max_tis)
+            .with_for_update(of=TI, **skip_locked(session=session))

Review comment:
       I see. I still have to wrap my head around the locking consequences 
here. I understand that here the only locked tables will be TI and DR. the DM 
is "selectinload"  so likely not locked.
   
   I really just want to understand the consequences and risks. I think a the 
end we should come up with some "cautious" recommendations to the users. After 
looking at the potential issues and gains, my current feeling (but needs a bit 
deeper thinking and testing) is that 
   
   * MySQL 5.7 -> go with 1 scheduler (and escape hatch if possible/easy to 
implement). Experiment if you'r adventurous, but expect moderate gains and 
higher risks of concurrently related problems
   * MySQL 8 -> should work fine and bring expected gains, with much lower risk 
of concurrency problems. 
   
   




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to