CurtHagenlocher commented on code in PR #3657:
URL: https://github.com/apache/arrow-adbc/pull/3657#discussion_r2487185552


##########
csharp/src/Drivers/Databricks/Reader/CloudFetch/CloudFetchDownloader.cs:
##########
@@ -483,56 +483,24 @@ await this.TraceActivityAsync(async activity =>
                 }
 
                 // Process the downloaded file data
-                MemoryStream dataStream;
-                long actualSize = fileData.Length;
+                Stream dataStream;
 
-                // If the data is LZ4 compressed, decompress it
+                // If the data is LZ4 compressed, wrap it in an LZ4Stream for 
on-demand decompression
+                // This avoids pre-decompressing the entire file into memory
                 if (_isLz4Compressed)
                 {
-                    try
-                    {
-                        var decompressStopwatch = Stopwatch.StartNew();
-
-                        // Use shared Lz4Utilities for decompression 
(consolidates logic with non-CloudFetch path)
-                        var (buffer, length) = await 
Lz4Utilities.DecompressLz4Async(
-                            fileData,
-                            cancellationToken).ConfigureAwait(false);
-
-                        // Create the dataStream from the decompressed buffer
-                        dataStream = new MemoryStream(buffer, 0, length, 
writable: false, publiclyVisible: true);
-                        dataStream.Position = 0;
-                        decompressStopwatch.Stop();
-
-                        // Calculate throughput metrics
-                        double compressionRatio = (double)dataStream.Length / 
actualSize;
-
-                        
activity?.AddEvent("cloudfetch.decompression_complete", [
-                            new("offset", downloadResult.Link.StartRowOffset),
-                            new("sanitized_url", sanitizedUrl),
-                            new("decompression_time_ms", 
decompressStopwatch.ElapsedMilliseconds),
-                            new("compressed_size_bytes", actualSize),
-                            new("compressed_size_kb", actualSize / 1024.0),
-                            new("decompressed_size_bytes", dataStream.Length),
-                            new("decompressed_size_kb", dataStream.Length / 
1024.0),
-                            new("compression_ratio", compressionRatio)
-                        ]);
+                    // Create a MemoryStream for the compressed data
+                    var compressedStream = new MemoryStream(fileData, 
writable: false);
 
-                        actualSize = dataStream.Length;
-                    }
-                    catch (Exception ex)
-                    {
-                        stopwatch.Stop();
-                        activity?.AddException(ex, [
-                            new("error.context", "cloudfetch.decompression"),
-                            new("offset", downloadResult.Link.StartRowOffset),
-                            new("sanitized_url", sanitizedUrl),
-                            new("elapsed_time_ms", 
stopwatch.ElapsedMilliseconds)
-                        ]);
+                    // Wrap it in an LZ4Stream that will decompress chunks 
on-demand as ArrowStreamReader reads
+                    dataStream = LZ4Stream.Decode(compressedStream);

Review Comment:
   When you use `Table.RowCount` like this, it is likely we're running that 
query on the back end and not locally. If you want to ensure that the count 
happens locally, you can do 
`Table.RowCount(Table.StopFolding(catalog_sales_Table))`. If those two 
variations don't produce the same value, then rows are getting lost in the 
download.
   
   I seem to recall that the progress reporting in Power BI Desktop can lose 
records at the end of the load. I tried to debug it once with the Desktop team, 
but there are three processes involved and the root cause is a race condition 
of some kind so we didn't make much progress :(.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to