chamikaramj commented on a change in pull request #15185:
URL: https://github.com/apache/beam/pull/15185#discussion_r693150664
##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -883,6 +893,275 @@ def _export_files(self, bq):
return table.schema, metadata_list
+class _CustomBigQueryStorageSourceBase(BoundedSource):
+ """A base class for BoundedSource implementations which read from BigQuery
+ using the BigQuery Storage API.
+
+ Args:
+ table (str, TableReference): The ID of the table. The ID must contain only
+ letters ``a-z``, ``A-Z``, numbers ``0-9``, or underscores ``_`` If
+ **dataset** argument is :data:`None` then the table argument must
+ contain the entire table reference specified as:
+ ``'PROJECT:DATASET.TABLE'`` or must specify a TableReference.
+ dataset (str): Optional ID of the dataset containing this table or
+ :data:`None` if the table argument specifies a TableReference.
+ project (str): Optional ID of the project containing this table or
+ :data:`None` if the table argument specifies a TableReference.
+ selected_fields (List[str]): Optional List of names of the fields in the
+ table that should be read. If empty, all fields will be read. If the
+ specified field is a nested field, all the sub-fields in the field will
be
+ selected. The output field order is unrelated to the order of fields in
+ selected_fields.
+ row_restriction (str): Optional SQL text filtering statement, similar to a
+ WHERE clause in a query. Aggregates are not supported. Restricted to a
+ maximum length for 1 MB.
+ """
+
+ # The maximum number of streams which will be requested when creating a read
+ # session, regardless of the desired bundle size.
+ MAX_SPLIT_COUNT = 10000
+ # The minimum number of streams which will be requested when creating a read
+ # session, regardless of the desired bundle size. Note that the server may
+ # still choose to return fewer than ten streams based on the layout of the
+ # table.
+ MIN_SPLIT_COUNT = 10
+
+ def __init__(
+ self,
+ table: Union[str, TableReference],
+ dataset: Optional[str] = None,
+ project: Optional[str] = None,
+ selected_fields: Optional[List[str]] = None,
+ row_restriction: Optional[str] = None,
+ use_fastavro_for_direct_read: Optional[bool] = None,
+ pipeline_options: Optional[GoogleCloudOptions] = None):
+
+ self.table_reference = bigquery_tools.parse_table_reference(
+ table, dataset, project)
+ self.table = self.table_reference.tableId
+ self.dataset = self.table_reference.datasetId
+ self.project = self.table_reference.projectId
+ self.selected_fields = selected_fields
+ self.row_restriction = row_restriction
+ self.use_fastavro = \
+ True if use_fastavro_for_direct_read is None else \
+ use_fastavro_for_direct_read
+ self.pipeline_options = pipeline_options
+ self.split_result = None
+
+ def _get_parent_project(self):
+ """Returns the project that will be billed."""
+ project = self.pipeline_options.view_as(GoogleCloudOptions).project
+ if isinstance(project, vp.ValueProvider):
+ project = project.get()
+ if not project:
+ project = self.project
+ return project
+
+ def _get_table_size(self, table, dataset, project):
+ if project is None:
+ project = self._get_parent_project()
+
+ bq = bigquery_tools.BigQueryWrapper()
+ table = bq.get_table(project, dataset, table)
+ return table.numBytes
+
+ def display_data(self):
+ return {
+ 'project': str(self.project),
+ 'dataset': str(self.dataset),
+ 'table': str(self.table),
+ 'selected_fields': str(self.selected_fields),
+ 'row_restriction': str(self.row_restriction),
+ 'use_fastavro': str(self.use_fastavro)
+ }
+
+ def estimate_size(self):
+ # Returns the pre-filtering size of the table being read.
+ return self._get_table_size(self.table, self.dataset, self.project)
+
+ def split(self, desired_bundle_size, start_position=None,
stop_position=None):
+ requested_session = bq_storage.types.ReadSession()
+ requested_session.table = 'projects/{}/datasets/{}/tables/{}'.format(
+ self.project, self.dataset, self.table)
+ requested_session.data_format = bq_storage.types.DataFormat.AVRO
+ if self.selected_fields is not None:
+ requested_session.read_options.selected_fields = self.selected_fields
+ if self.row_restriction is not None:
+ requested_session.read_options.row_restriction = self.row_restriction
+
+ storage_client = bq_storage.BigQueryReadClient()
+ stream_count = 0
+ if desired_bundle_size > 0:
+ table_size = self._get_table_size(self.table, self.dataset, self.project)
+ stream_count = min(
+ int(table_size / desired_bundle_size),
+ _CustomBigQueryStorageSourceBase.MAX_SPLIT_COUNT)
+ stream_count = max(
+ stream_count, _CustomBigQueryStorageSourceBase.MIN_SPLIT_COUNT)
+
+ parent = 'projects/{}'.format(self.project)
+ read_session = storage_client.create_read_session(
+ parent=parent,
+ read_session=requested_session,
+ max_stream_count=stream_count)
+ _LOGGER.info(
+ 'Sent BigQuery Storage API CreateReadSession request: \n %s \n'
+ 'Received response \n %s.',
+ requested_session,
+ read_session)
+
+ self.split_result = [
+ _CustomBigQueryStorageStreamSource(stream.name, self.use_fastavro)
+ for stream in read_session.streams
+ ]
+
+ for source in self.split_result:
+ yield SourceBundle(
+ weight=1.0, source=source, start_position=None, stop_position=None)
+
+ def get_range_tracker(self, start_position, stop_position):
+ class NonePositionRangeTracker(RangeTracker):
+ """A RangeTracker that always returns positions as None. Prevents the
+ BigQuery Storage source from being read() before being split()."""
+ def start_position(self):
+ return None
+
+ def stop_position(self):
+ return None
+
+ return NonePositionRangeTracker()
+
+ def read(self, range_tracker):
+ raise NotImplementedError(
+ 'BigQuery storage source must be split before being read')
+
+
+class _CustomBigQueryStorageStreamSource(BoundedSource):
+ """A source representing a single stream in a read session."""
+ def __init__(self, read_stream_name: str, use_fastavro: bool):
+ self.read_stream_name = read_stream_name
+ self.use_fastavro = use_fastavro
+
+ def display_data(self):
+ return {
+ 'read_stream': str(self.read_stream_name),
+ }
+
+ def estimate_size(self):
+ # The size of stream source cannot be estimate due to server-side liquid
+ # sharding.
+ # TODO: Implement progress reporting.
+ return None
+
+ def split(self, desired_bundle_size, start_position=None,
stop_position=None):
+ # A stream source can't be split without reading from it due to
+ # server-side liquid sharding. A split will simply return the current
source
+ # for now.
+ return SourceBundle(
+ weight=1.0,
+ source=_CustomBigQueryStorageStreamSource(self.read_stream_name),
Review comment:
Seems like this was addressed.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]