[ 
https://issues.apache.org/jira/browse/BEAM-10917?focusedWorklogId=631221&page=com.atlassian.jira.plugin.system.issuetabpanels:worklog-tabpanel#worklog-631221
 ]

ASF GitHub Bot logged work on BEAM-10917:
-----------------------------------------

                Author: ASF GitHub Bot
            Created on: 29/Jul/21 16:48
            Start Date: 29/Jul/21 16:48
    Worklog Time Spent: 10m 
      Work Description: kmjung commented on a change in pull request #15185:
URL: https://github.com/apache/beam/pull/15185#discussion_r679318445



##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -1056,23 +1054,43 @@ def deserialize_rows(self, read_rows_response, reader):
 
   def read(self, range_tracker):
     storage_client = bq_storage.BigQueryReadClient()
-    read_rows_iterator = iter(storage_client.read_rows(self.read_stream_name))
-    read_rows_response = next(read_rows_iterator)
-    avro_schema = avro.schema.Parse(read_rows_response.avro_schema.schema)
-    reader = avroio.DatumReader(avro_schema)
-    while True:
-      # self.deserialize_rows(read_rows_response, reader)
-      decoder = avroio.BinaryDecoder(
-          io.BytesIO(read_rows_response.avro_rows.serialized_binary_rows))
-      current_row = 0
-      while current_row < read_rows_response.row_count:
-        self.deserialized_rows.append(reader.read(decoder))
-        current_row += 1
-      read_rows_response = next(read_rows_iterator, None)
-      if read_rows_response is None:
-        break
-
-    return iter(self.deserialized_rows)
+    row_reader = _ReadRowsResponseReader(
+        storage_client.read_rows(self.read_stream_name))
+    return iter(row_reader)
+
+
+class _ReadRowsResponseReader():
+  def __init__(self, read_rows_responses):
+    self.read_rows_iterator = iter(read_rows_responses)
+    self.read_rows_response = next(self.read_rows_iterator)
+    self.avro_schema = avro.schema.Parse(
+        self.read_rows_response.avro_schema.schema)
+    self.reader = avroio.DatumReader(self.avro_schema)
+    self.decoder = avroio.BinaryDecoder(
+        io.BytesIO(self.read_rows_response.avro_rows.serialized_binary_rows))
+    self.current_row = 0
+
+  def __iter__(self):
+    return self
+
+  def get_deserialized_row(self):
+    deserialized_row = self.reader.read(self.decoder)
+    self.current_row += 1
+    return deserialized_row
+
+  def __next__(self):
+    if self.current_row < self.read_rows_response.row_count:
+      return self.get_deserialized_row()
+    else:

Review comment:
       Nit: you don't need an "else" clause for an early return like this.

##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -1056,23 +1054,43 @@ def deserialize_rows(self, read_rows_response, reader):
 
   def read(self, range_tracker):
     storage_client = bq_storage.BigQueryReadClient()
-    read_rows_iterator = iter(storage_client.read_rows(self.read_stream_name))
-    read_rows_response = next(read_rows_iterator)
-    avro_schema = avro.schema.Parse(read_rows_response.avro_schema.schema)
-    reader = avroio.DatumReader(avro_schema)
-    while True:
-      # self.deserialize_rows(read_rows_response, reader)
-      decoder = avroio.BinaryDecoder(
-          io.BytesIO(read_rows_response.avro_rows.serialized_binary_rows))
-      current_row = 0
-      while current_row < read_rows_response.row_count:
-        self.deserialized_rows.append(reader.read(decoder))
-        current_row += 1
-      read_rows_response = next(read_rows_iterator, None)
-      if read_rows_response is None:
-        break
-
-    return iter(self.deserialized_rows)
+    row_reader = _ReadRowsResponseReader(
+        storage_client.read_rows(self.read_stream_name))
+    return iter(row_reader)
+
+
+class _ReadRowsResponseReader():
+  def __init__(self, read_rows_responses):
+    self.read_rows_iterator = iter(read_rows_responses)
+    self.read_rows_response = next(self.read_rows_iterator)

Review comment:
       Nit: it's possible in the case of very selective filters that you might 
get no responses when reading from a stream. You might consider deferring some 
of this initialization to the first `__next__` call.

##########
File path: sdks/python/apache_beam/io/gcp/bigquery.py
##########
@@ -1056,23 +1054,43 @@ def deserialize_rows(self, read_rows_response, reader):
 
   def read(self, range_tracker):
     storage_client = bq_storage.BigQueryReadClient()
-    read_rows_iterator = iter(storage_client.read_rows(self.read_stream_name))
-    read_rows_response = next(read_rows_iterator)
-    avro_schema = avro.schema.Parse(read_rows_response.avro_schema.schema)
-    reader = avroio.DatumReader(avro_schema)
-    while True:
-      # self.deserialize_rows(read_rows_response, reader)
-      decoder = avroio.BinaryDecoder(
-          io.BytesIO(read_rows_response.avro_rows.serialized_binary_rows))
-      current_row = 0
-      while current_row < read_rows_response.row_count:
-        self.deserialized_rows.append(reader.read(decoder))
-        current_row += 1
-      read_rows_response = next(read_rows_iterator, None)
-      if read_rows_response is None:
-        break
-
-    return iter(self.deserialized_rows)
+    row_reader = _ReadRowsResponseReader(
+        storage_client.read_rows(self.read_stream_name))
+    return iter(row_reader)
+
+
+class _ReadRowsResponseReader():
+  def __init__(self, read_rows_responses):
+    self.read_rows_iterator = iter(read_rows_responses)
+    self.read_rows_response = next(self.read_rows_iterator)
+    self.avro_schema = avro.schema.Parse(
+        self.read_rows_response.avro_schema.schema)
+    self.reader = avroio.DatumReader(self.avro_schema)
+    self.decoder = avroio.BinaryDecoder(
+        io.BytesIO(self.read_rows_response.avro_rows.serialized_binary_rows))
+    self.current_row = 0

Review comment:
       Nit: this looks like the offset of the next row, in practice.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Issue Time Tracking
-------------------

    Worklog Id:     (was: 631221)
    Time Spent: 1h  (was: 50m)

> Implement a BigQuery bounded source using the BigQuery storage API
> ------------------------------------------------------------------
>
>                 Key: BEAM-10917
>                 URL: https://issues.apache.org/jira/browse/BEAM-10917
>             Project: Beam
>          Issue Type: New Feature
>          Components: io-py-gcp
>            Reporter: Kenneth Jung
>            Priority: P3
>          Time Spent: 1h
>  Remaining Estimate: 0h
>
> The Java SDK contains a bounded source implementation which uses the BigQuery 
> storage API to read from BigQuery. We should implement the same for Python.



--
This message was sent by Atlassian Jira
(v8.3.4#803005)

Reply via email to