Kontinuation commented on code in PR #36:
URL: https://github.com/apache/sedona-db/pull/36#discussion_r2329326935


##########
rust/sedona/src/record_batch_reader_provider.rs:
##########
@@ -88,26 +88,78 @@ impl TableProvider for RecordBatchReaderProvider {
         _filters: &[Expr],
         limit: Option<usize>,
     ) -> Result<Arc<dyn ExecutionPlan>> {
-        let mut writable_reader = self.reader.try_write().map_err(|_| {
-            DataFusionError::Internal("Failed to acquire lock on 
RecordBatchReader".to_string())
-        })?;
-        if let Some(reader) = writable_reader.take() {
+        let mut reader_guard = self.reader.lock();
+        if let Some(reader) = reader_guard.take() {
             Ok(Arc::new(RecordBatchReaderExec::new(reader, limit)))
         } else {
             sedona_internal_err!("Can't scan RecordBatchReader provider more 
than once")
         }
     }
 }
 
+/// An iterator that limits the number of rows from a RecordBatchReader
+struct RowLimitedIterator {
+    reader: Option<Box<dyn RecordBatchReader + Send>>,
+    limit: usize,
+    rows_consumed: usize,
+}
+
+impl RowLimitedIterator {
+    fn new(reader: Box<dyn RecordBatchReader + Send>, limit: usize) -> Self {
+        Self {
+            reader: Some(reader),
+            limit,
+            rows_consumed: 0,
+        }
+    }
+}
+
+impl Iterator for RowLimitedIterator {
+    type Item = Result<arrow_array::RecordBatch>;
+
+    fn next(&mut self) -> Option<Self::Item> {
+        // Check if we have already consumed enough rows
+        if self.rows_consumed >= self.limit {
+            self.reader = None;

Review Comment:
   I'd like to keep the invaraiant that once the iterator is exhauseted, 
`reader` will be set to `None`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to