dstandish commented on code in PR #37016:
URL: https://github.com/apache/airflow/pull/37016#discussion_r1496734118
##########
airflow/jobs/scheduler_job_runner.py:
##########
@@ -1247,59 +1240,68 @@ def _create_dag_runs_dataset_triggered(
# create a new one. This is so that in the next Scheduling loop we
try to create new runs
# instead of falling in a loop of Integrity Error.
exec_date = exec_dates[dag.dag_id]
- if (dag.dag_id, exec_date) not in existing_dagruns:
- previous_dag_run = session.scalar(
- select(DagRun)
- .where(
- DagRun.dag_id == dag.dag_id,
- DagRun.execution_date < exec_date,
- DagRun.run_type == DagRunType.DATASET_TRIGGERED,
- )
- .order_by(DagRun.execution_date.desc())
+ curr_date_query = select(
+ exists(
+ select(literal_column("1"))
+ .where(DagRun.dag_id == dag.dag_id, DagRun.execution_date
== exec_date)
.limit(1)
)
- dataset_event_filters = [
- DagScheduleDatasetReference.dag_id == dag.dag_id,
- DatasetEvent.timestamp <= exec_date,
- ]
- if previous_dag_run:
- dataset_event_filters.append(DatasetEvent.timestamp >
previous_dag_run.execution_date)
-
- dataset_events = session.scalars(
- select(DatasetEvent)
- .join(
- DagScheduleDatasetReference,
- DatasetEvent.dataset_id ==
DagScheduleDatasetReference.dataset_id,
- )
- .join(DatasetEvent.source_dag_run)
- .where(*dataset_event_filters)
- ).all()
-
- data_interval =
dag.timetable.data_interval_for_events(exec_date, dataset_events)
- run_id = dag.timetable.generate_run_id(
- run_type=DagRunType.DATASET_TRIGGERED,
- logical_date=exec_date,
- data_interval=data_interval,
- session=session,
- events=dataset_events,
- )
+ )
+ if session.scalar(curr_date_query): # dag already exists
+ continue
- dag_run = dag.create_dagrun(
- run_id=run_id,
- run_type=DagRunType.DATASET_TRIGGERED,
- execution_date=exec_date,
- data_interval=data_interval,
- state=DagRunState.QUEUED,
- external_trigger=False,
- session=session,
- dag_hash=dag_hash,
- creating_job_id=self.job.id,
+ prev_exec_date = session.scalar(
+ select(DagRun.execution_date)
+ .where(
+ DagRun.dag_id == dag.dag_id,
+ DagRun.execution_date < exec_date,
+ DagRun.run_type == DagRunType.DATASET_TRIGGERED,
)
- Stats.incr("dataset.triggered_dagruns")
- dag_run.consumed_dataset_events.extend(dataset_events)
- session.execute(
-
delete(DatasetDagRunQueue).where(DatasetDagRunQueue.target_dag_id ==
dag_run.dag_id)
+ .order_by(DagRun.execution_date.desc())
+ .limit(1)
+ )
+ dataset_event_filters = [
+ DagScheduleDatasetReference.dag_id == dag.dag_id,
+ DatasetEvent.timestamp <= exec_date,
+ ]
+ if prev_exec_date:
+ dataset_event_filters.append(DatasetEvent.timestamp >
prev_exec_date)
+
+ dataset_events = session.scalars(
+ select(DatasetEvent)
+ .join(
+ DagScheduleDatasetReference,
+ DatasetEvent.dataset_id ==
DagScheduleDatasetReference.dataset_id,
)
+ .join(DatasetEvent.source_dag_run)
+ .where(*dataset_event_filters)
Review Comment:
yeah i'll leave this one alone for now.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]