chirodip98 commented on code in PR #60083:
URL: https://github.com/apache/airflow/pull/60083#discussion_r2661136146


##########
providers/google/src/airflow/providers/google/cloud/operators/dataproc.py:
##########
@@ -995,35 +995,35 @@ def __init__(
 
     def execute(self, context: Context) -> None:
         hook = DataprocHook(gcp_conn_id=self.gcp_conn_id, 
impersonation_chain=self.impersonation_chain)
-        operation = self._delete_cluster(hook)
-        if not self.deferrable:
-            hook.wait_for_operation(timeout=self.timeout, 
result_retry=self.retry, operation=operation)
-            self.log.info("Cluster deleted.")
-        else:
-            try:
-                hook.get_cluster(
-                    project_id=self.project_id, region=self.region, 
cluster_name=self.cluster_name
-                )
-            except NotFound:
+        try:
+            op: operation.Operation = self._delete_cluster(hook)
+            if not self.deferrable:
+                hook.wait_for_operation(timeout=self.timeout, 
result_retry=self.retry, operation=op)
                 self.log.info("Cluster deleted.")
-                return
-            except Exception as e:
-                raise AirflowException(str(e))
-
-            end_time: float = time.time() + self.timeout
-            self.defer(
-                trigger=DataprocDeleteClusterTrigger(
-                    gcp_conn_id=self.gcp_conn_id,
-                    project_id=self.project_id,
-                    region=self.region,
-                    cluster_name=self.cluster_name,
-                    end_time=end_time,
-                    metadata=self.metadata,
-                    impersonation_chain=self.impersonation_chain,
-                    polling_interval_seconds=self.polling_interval_seconds,
-                ),
-                method_name="execute_complete",
+            else:
+                end_time: float = time.time() + self.timeout
+                self.defer(
+                    trigger=DataprocDeleteClusterTrigger(
+                        gcp_conn_id=self.gcp_conn_id,
+                        project_id=self.project_id,
+                        region=self.region,
+                        cluster_name=self.cluster_name,
+                        end_time=end_time,
+                        metadata=self.metadata,
+                        impersonation_chain=self.impersonation_chain,
+                        polling_interval_seconds=self.polling_interval_seconds,
+                    ),
+                    method_name="execute_complete",
+                )
+        except NotFound:
+            self.log.info(
+                "Cluster %s not found in region %s. Skipping deletion.", 
self.cluster_name, self.region
+            )
+            raise AirflowSkipException(

Review Comment:
   The idea is here to exit safely incase the the cluster is deleted and 
deletion is tried. Let's say due to idle timeout. I was thinking of adding a 
flag to enable this behaviour like raise_exception_when_notfound (or something 
better!)
    , which will give the users the opportunity to handle the scenario , 
without failing the task altogether.
   
   Kindly suggest!



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to