kmjung commented on a change in pull request #15185:
URL: https://github.com/apache/beam/pull/15185#discussion_r679318445
##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -1056,23 +1054,43 @@ def deserialize_rows(self, read_rows_response, reader):
def read(self, range_tracker):
storage_client = bq_storage.BigQueryReadClient()
- read_rows_iterator = iter(storage_client.read_rows(self.read_stream_name))
- read_rows_response = next(read_rows_iterator)
- avro_schema = avro.schema.Parse(read_rows_response.avro_schema.schema)
- reader = avroio.DatumReader(avro_schema)
- while True:
- # self.deserialize_rows(read_rows_response, reader)
- decoder = avroio.BinaryDecoder(
- io.BytesIO(read_rows_response.avro_rows.serialized_binary_rows))
- current_row = 0
- while current_row < read_rows_response.row_count:
- self.deserialized_rows.append(reader.read(decoder))
- current_row += 1
- read_rows_response = next(read_rows_iterator, None)
- if read_rows_response is None:
- break
-
- return iter(self.deserialized_rows)
+ row_reader = _ReadRowsResponseReader(
+ storage_client.read_rows(self.read_stream_name))
+ return iter(row_reader)
+
+
+class _ReadRowsResponseReader():
+ def __init__(self, read_rows_responses):
+ self.read_rows_iterator = iter(read_rows_responses)
+ self.read_rows_response = next(self.read_rows_iterator)
+ self.avro_schema = avro.schema.Parse(
+ self.read_rows_response.avro_schema.schema)
+ self.reader = avroio.DatumReader(self.avro_schema)
+ self.decoder = avroio.BinaryDecoder(
+ io.BytesIO(self.read_rows_response.avro_rows.serialized_binary_rows))
+ self.current_row = 0
+
+ def __iter__(self):
+ return self
+
+ def get_deserialized_row(self):
+ deserialized_row = self.reader.read(self.decoder)
+ self.current_row += 1
+ return deserialized_row
+
+ def __next__(self):
+ if self.current_row < self.read_rows_response.row_count:
+ return self.get_deserialized_row()
+ else:
Review comment:
Nit: you don't need an "else" clause for an early return like this.
##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -1056,23 +1054,43 @@ def deserialize_rows(self, read_rows_response, reader):
def read(self, range_tracker):
storage_client = bq_storage.BigQueryReadClient()
- read_rows_iterator = iter(storage_client.read_rows(self.read_stream_name))
- read_rows_response = next(read_rows_iterator)
- avro_schema = avro.schema.Parse(read_rows_response.avro_schema.schema)
- reader = avroio.DatumReader(avro_schema)
- while True:
- # self.deserialize_rows(read_rows_response, reader)
- decoder = avroio.BinaryDecoder(
- io.BytesIO(read_rows_response.avro_rows.serialized_binary_rows))
- current_row = 0
- while current_row < read_rows_response.row_count:
- self.deserialized_rows.append(reader.read(decoder))
- current_row += 1
- read_rows_response = next(read_rows_iterator, None)
- if read_rows_response is None:
- break
-
- return iter(self.deserialized_rows)
+ row_reader = _ReadRowsResponseReader(
+ storage_client.read_rows(self.read_stream_name))
+ return iter(row_reader)
+
+
+class _ReadRowsResponseReader():
+ def __init__(self, read_rows_responses):
+ self.read_rows_iterator = iter(read_rows_responses)
+ self.read_rows_response = next(self.read_rows_iterator)
Review comment:
Nit: it's possible in the case of very selective filters that you might
get no responses when reading from a stream. You might consider deferring some
of this initialization to the first `__next__` call.
##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -1056,23 +1054,43 @@ def deserialize_rows(self, read_rows_response, reader):
def read(self, range_tracker):
storage_client = bq_storage.BigQueryReadClient()
- read_rows_iterator = iter(storage_client.read_rows(self.read_stream_name))
- read_rows_response = next(read_rows_iterator)
- avro_schema = avro.schema.Parse(read_rows_response.avro_schema.schema)
- reader = avroio.DatumReader(avro_schema)
- while True:
- # self.deserialize_rows(read_rows_response, reader)
- decoder = avroio.BinaryDecoder(
- io.BytesIO(read_rows_response.avro_rows.serialized_binary_rows))
- current_row = 0
- while current_row < read_rows_response.row_count:
- self.deserialized_rows.append(reader.read(decoder))
- current_row += 1
- read_rows_response = next(read_rows_iterator, None)
- if read_rows_response is None:
- break
-
- return iter(self.deserialized_rows)
+ row_reader = _ReadRowsResponseReader(
+ storage_client.read_rows(self.read_stream_name))
+ return iter(row_reader)
+
+
+class _ReadRowsResponseReader():
+ def __init__(self, read_rows_responses):
+ self.read_rows_iterator = iter(read_rows_responses)
+ self.read_rows_response = next(self.read_rows_iterator)
+ self.avro_schema = avro.schema.Parse(
+ self.read_rows_response.avro_schema.schema)
+ self.reader = avroio.DatumReader(self.avro_schema)
+ self.decoder = avroio.BinaryDecoder(
+ io.BytesIO(self.read_rows_response.avro_rows.serialized_binary_rows))
+ self.current_row = 0
Review comment:
Nit: this looks like the offset of the next row, in practice.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]