VladaZakharova commented on code in PR #29260:
URL: https://github.com/apache/airflow/pull/29260#discussion_r1095885932


##########
airflow/sensors/external_task.py:
##########
@@ -360,6 +362,93 @@ def _handle_execution_date_fn(self, context) -> Any:
         return kwargs_callable(logical_date, **kwargs)
 
 
+class ExternalTaskAsyncSensor(ExternalTaskSensor):
+    """
+    Waits for a different DAG, task group, or task to complete for a specific 
logical date.
+
+    If both `external_task_group_id` and `external_task_id` are ``None`` 
(default), the sensor
+    waits for the DAG.
+    Values for `external_task_group_id` and `external_task_id` can't be set at 
the same time.
+
+    By default, the ExternalTaskAsyncSensor will wait for the external task to
+    succeed, at which point it will also succeed. However, by default it will
+    *not* fail if the external task fails, but will continue to check the 
status
+    until the sensor times out (thus giving you time to retry the external task
+    without also having to clear the sensor).
+
+    It is possible to alter the default behavior by setting states which
+    cause the sensor to fail, e.g. by setting ``allowed_states=[State.FAILED]``
+    and ``failed_states=[State.SUCCESS]`` you will flip the behaviour to get a
+    sensor which goes green when the external task *fails* and immediately goes
+    red if the external task *succeeds*!
+
+    Note that ``soft_fail`` is respected when examining the failed_states. Thus
+    if the external task enters a failed state and ``soft_fail == True`` the
+    sensor will _skip_ rather than fail. As a result, setting 
``soft_fail=True``
+    and ``failed_states=[State.SKIPPED]`` will result in the sensor skipping if
+    the external task skips.
+
+    :param external_dag_id: The dag_id that contains the task you want to
+        wait for
+    :param external_task_id: The task_id that contains the task you want to
+        wait for.
+    :param external_task_ids: The list of task_ids that you want to wait for.
+        If ``None`` (default value) the sensor waits for the DAG. Either
+        external_task_id or external_task_ids can be passed to
+        ExternalTaskSensor, but not both.
+    :param allowed_states: Iterable of allowed states, default is 
``['success']``
+    :param failed_states: Iterable of failed or dis-allowed states, default is 
``None``
+    :param execution_delta: time difference with the previous execution to
+        look at, the default is the same logical date as the current task or 
DAG.
+        For yesterday, use [positive!] datetime.timedelta(days=1). Either
+        execution_delta or execution_date_fn can be passed to
+        ExternalTaskSensor, but not both.
+    :param execution_date_fn: function that receives the current execution's 
logical date as the first
+        positional argument and optionally any number of keyword arguments 
available in the
+        context dictionary, and returns the desired logical dates to query.
+        Either execution_delta or execution_date_fn can be passed to 
ExternalTaskSensor,
+        but not both.
+    :param check_existence: Set to `True` to check if the external task exists 
(when
+        external_task_id is not None) or check if the DAG to wait for exists 
(when
+        external_task_id is None), and immediately cease waiting if the 
external task
+        or DAG does not exist (default value: False).
+    :param polling_interval_seconds: Time (seconds) to wait between calls to 
check the run status.
+    """
+
+    def __init__(
+        self,
+        external_task_id: str | None = None,
+        external_task_ids: Collection[str] | None = None,
+        polling_interval_seconds: int = 10,

Review Comment:
   Sure, thank you, i have updated the code with this name



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to