uranusjr commented on code in PR #32645:
URL: https://github.com/apache/airflow/pull/32645#discussion_r1268969464


##########
airflow/models/trigger.py:
##########
@@ -117,17 +117,24 @@ def clean_unused(cls, session: Session = NEW_SESSION) -> 
None:
         # Update all task instances with trigger IDs that are not DEFERRED to 
remove them
         for attempt in run_with_db_retries():
             with attempt:
-                session.query(TaskInstance).filter(
-                    TaskInstance.state != TaskInstanceState.DEFERRED, 
TaskInstance.trigger_id.isnot(None)
-                ).update({TaskInstance.trigger_id: None})
+                session.execute(
+                    update(TaskInstance)
+                    .where(
+                        TaskInstance.state != TaskInstanceState.DEFERRED, 
TaskInstance.trigger_id.is_not(None)
+                    )
+                    .values(trigger_id=None)
+                )
+
         # Get all triggers that have no task instances depending on them...
         ids = [
             trigger_id
-            for (trigger_id,) in (
-                session.query(cls.id)
-                .join(TaskInstance, cls.id == TaskInstance.trigger_id, 
isouter=True)
-                .group_by(cls.id)
-                .having(func.count(TaskInstance.trigger_id) == 0)
+            for trigger_id in (
+                session.scalars(
+                    select(cls.id)
+                    .join(TaskInstance, cls.id == TaskInstance.trigger_id, 
isouter=True)
+                    .group_by(cls.id)
+                    .having(func.count(TaskInstance.trigger_id) == 0)
+                )
             )
         ]

Review Comment:
   This is equivalent to `ids = session.scalars(...).all()`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to