Abacn commented on code in PR #32428:
URL: https://github.com/apache/beam/pull/32428#discussion_r1761668868
##########
sdks/python/apache_beam/io/gcp/gcsio.py:
##########
@@ -262,33 +269,18 @@ def delete_batch(self, paths):
succeeded or the relevant exception if the operation failed.
"""
final_results = []
- s = 0
- if not isinstance(paths, list): paths = list(iter(paths))
- while s < len(paths):
- if (s + MAX_BATCH_OPERATION_SIZE) < len(paths):
- current_paths = paths[s:s + MAX_BATCH_OPERATION_SIZE]
- else:
- current_paths = paths[s:]
- current_batch = self.client.batch(raise_exception=False)
- with current_batch:
- for path in current_paths:
- bucket_name, blob_name = parse_gcs_path(path)
- bucket = self.client.bucket(bucket_name)
- bucket.delete_blob(blob_name)
-
- for i, path in enumerate(current_paths):
- error_code = None
- resp = current_batch._responses[i]
- if resp.status_code >= 400 and resp.status_code != 404:
- error_code = resp.status_code
- final_results.append((path, error_code))
-
- s += MAX_BATCH_OPERATION_SIZE
-
+ for path in paths:
+ error_code = None
+ try:
+ self.delete(path)
Review Comment:
I am hesitating whether if worth to undo the batch operation, which may have
efficiency regression. In a FileIO write, batch copy and delete happens at the
end of a pipeline (not the most resource restraing part). I suggest we leave
batch copy and delete as is.
##########
sdks/python/apache_beam/io/gcp/gcsio_retry.py:
##########
@@ -0,0 +1,71 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+"""
+Throttling Handler for GCSIO
+"""
+
+import inspect
+import logging
+import math
+
+from google.api_core import exceptions as api_exceptions
+from google.api_core import retry
+from google.cloud.storage.retry import DEFAULT_RETRY
+from google.cloud.storage.retry import _should_retry # pylint:
disable=protected-access
+
+from apache_beam.metrics.metric import Metrics
+from apache_beam.options.pipeline_options import GoogleCloudOptions
+
+_LOGGER = logging.getLogger(__name__)
+
+__all__ = ['DEFAULT_RETRY_WITH_THROTTLING_COUNTERS']
+
+
+class ThrottlingHandler(object):
+ _THROTTLED_SECS = Metrics.counter('gcsio', "cumulativeThrottlingSeconds")
+
+ def __call__(self, exc):
+ if isinstance(exc, api_exceptions.TooManyRequests):
+ _LOGGER.debug('Caught GCS quota error (%s), retrying.', exc.reason)
+ # TODO: revist the logic here when gcs client library supports error
+ # callbacks
+ frame = inspect.currentframe()
+ if frame is None:
+ _LOGGER.warning('cannot inspect the current stack frame')
+ return
+
+ prev_frame = frame.f_back
+ if prev_frame is None:
+ _LOGGER.warning('cannot inspect the caller stack frame')
+ return
+
+ # next_sleep is one of the arguments in the caller
+ # i.e. _retry_error_helper() in google/api_core/retry/retry_base.py
+ sleep_seconds = prev_frame.f_locals.get("next_sleep", 0)
+ ThrottlingHandler._THROTTLED_SECS.inc(math.ceil(sleep_seconds))
Review Comment:
In Java, metrics increment in an grpc callback thread won't work because the
callback thread does not have metrics container. Though most likely this is not
a concern in Python, good if one can confirm if this will work.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]