vandonr-amz commented on code in PR #31018:
URL: https://github.com/apache/airflow/pull/31018#discussion_r1221899088
##########
airflow/providers/amazon/aws/sensors/s3.py:
##########
@@ -144,32 +145,39 @@ def execute(self, context: Context) -> None:
super().execute(context)
else:
if not self.poke(context=context):
- self.defer(
- timeout=timedelta(seconds=self.timeout),
- trigger=S3KeyTrigger(
- bucket_name=cast(str, self.bucket_name),
- bucket_key=self.bucket_key,
- wildcard_match=self.wildcard_match,
- check_fn=self.check_fn,
- aws_conn_id=self.aws_conn_id,
- verify=self.verify,
- poke_interval=self.poke_interval,
- ),
- method_name="execute_complete",
- )
+ self._defer()
+
+ def _defer(self) -> None:
+ """Check for a keys in s3 and defers using the triggerer."""
Review Comment:
this is not really what it's doing. It only does the "defers using the
triggerer" part
maybe you meant to include the `if not self.poke(context=context):` in there
?
##########
airflow/providers/amazon/aws/sensors/s3.py:
##########
@@ -93,6 +96,8 @@ def __init__(
self.check_fn = check_fn
self.aws_conn_id = aws_conn_id
self.verify = verify
+ self.deferrable = deferrable
+ self.should_check_fn = True if check_fn else False
Review Comment:
I don't see the benefit of pre-computing this. I'd inline this code where
it's used (in the `defer` call)
##########
airflow/providers/amazon/aws/sensors/s3.py:
##########
@@ -131,6 +136,50 @@ def poke(self, context: Context):
else:
return all(self._check_key(key) for key in self.bucket_key)
+ def execute(self, context: Context) -> None:
+ """
+ Defers to Trigger class to poll for state of the job run until
Review Comment:
it defers to trigger only if ran in deferrable mode
##########
airflow/providers/amazon/aws/triggers/s3.py:
##########
@@ -0,0 +1,99 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied. See the License for the
+# specific language governing permissions and limitations
+# under the License.
+from __future__ import annotations
+
+import asyncio
+from functools import cached_property
+from typing import Any, AsyncIterator
+
+from airflow.providers.amazon.aws.hooks.s3 import S3Hook
+from airflow.triggers.base import BaseTrigger, TriggerEvent
+
+
+class S3KeyTrigger(BaseTrigger):
+ """
+ S3KeyTrigger is fired as deferred class with params to run the task in
trigger worker.
+
+ :param bucket_name: Name of the S3 bucket. Only needed when ``bucket_key``
+ is not provided as a full s3:// url.
+ :param bucket_key: The key being waited on. Supports full s3:// style url
+ or relative path from root level. When it's specified as a full s3://
+ url, please leave bucket_name as `None`.
+ :param wildcard_match: whether the bucket_key should be interpreted as a
+ Unix wildcard pattern
+ :param aws_conn_id: reference to the s3 connection
+ :param hook_params: params for hook its optional
+ """
+
+ def __init__(
+ self,
+ bucket_name: str,
+ bucket_key: str | list[str],
+ wildcard_match: bool = False,
+ aws_conn_id: str = "aws_default",
+ poke_interval: float = 5.0,
+ should_check_fn: bool = False,
+ **hook_params: Any,
+ ):
+ super().__init__()
+ self.bucket_name = bucket_name
+ self.bucket_key = bucket_key
+ self.wildcard_match = wildcard_match
+ self.aws_conn_id = aws_conn_id
+ self.hook_params = hook_params
+ self.poke_interval = poke_interval
+ self.should_check_fn = should_check_fn
+
+ def serialize(self) -> tuple[str, dict[str, Any]]:
+ """Serialize S3KeyTrigger arguments and classpath."""
+ return (
+ "airflow.providers.amazon.aws.triggers.s3.S3KeyTrigger",
+ {
+ "bucket_name": self.bucket_name,
+ "bucket_key": self.bucket_key,
+ "wildcard_match": self.wildcard_match,
+ "aws_conn_id": self.aws_conn_id,
+ "hook_params": self.hook_params,
+ "poke_interval": self.poke_interval,
+ "should_check_fn": self.should_check_fn,
+ },
+ )
+
+ @cached_property
+ def hook(self) -> S3Hook:
+ return S3Hook(aws_conn_id=self.aws_conn_id,
verify=self.hook_params.get("verify"))
+
+ async def run(self) -> AsyncIterator[TriggerEvent]:
+ """Make an asynchronous connection using S3HookAsync."""
+ try:
+ async with self.hook.async_conn as client:
+ while True:
+ if await self.hook.check_key_async(
+ client, self.bucket_name, self.bucket_key,
self.wildcard_match
+ ):
+ if self.should_check_fn:
+ s3_objects = await self.hook.get_files_async(
+ client, self.bucket_name, self.bucket_key,
self.wildcard_match
+ )
+ await asyncio.sleep(self.poke_interval)
+ yield TriggerEvent({"status": "running", "files":
s3_objects})
Review Comment:
I don't think this is a good idea. It means that every `poke_interval`
you're going to go back to the scheduler, to find a worker, only to run a
method and send the job back to the triggerer to sleep one more interval...
Maybe I'm too worried, but it's going to put a lot of pressure on the
scheduler, and it's also a lot of back and forth between workers and triggerer,
which would make the balance tilt towards not using deferrable in that case.
Or we need to find a way to pass whatever we want to do in that check_fn to
the triggerer. Maybe a [JMESPath](https://jmespath.org/) expression ?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]