jadewang-db commented on code in PR #3657:
URL: https://github.com/apache/arrow-adbc/pull/3657#discussion_r2482675936


##########
csharp/src/Drivers/Databricks/Reader/CloudFetch/CloudFetchDownloader.cs:
##########
@@ -483,56 +483,24 @@ await this.TraceActivityAsync(async activity =>
                 }
 
                 // Process the downloaded file data
-                MemoryStream dataStream;
-                long actualSize = fileData.Length;
+                Stream dataStream;
 
-                // If the data is LZ4 compressed, decompress it
+                // If the data is LZ4 compressed, wrap it in an LZ4Stream for 
on-demand decompression
+                // This avoids pre-decompressing the entire file into memory
                 if (_isLz4Compressed)
                 {
-                    try
-                    {
-                        var decompressStopwatch = Stopwatch.StartNew();
-
-                        // Use shared Lz4Utilities for decompression 
(consolidates logic with non-CloudFetch path)
-                        var (buffer, length) = await 
Lz4Utilities.DecompressLz4Async(
-                            fileData,
-                            cancellationToken).ConfigureAwait(false);
-
-                        // Create the dataStream from the decompressed buffer
-                        dataStream = new MemoryStream(buffer, 0, length, 
writable: false, publiclyVisible: true);
-                        dataStream.Position = 0;
-                        decompressStopwatch.Stop();
-
-                        // Calculate throughput metrics
-                        double compressionRatio = (double)dataStream.Length / 
actualSize;
-
-                        
activity?.AddEvent("cloudfetch.decompression_complete", [
-                            new("offset", downloadResult.Link.StartRowOffset),
-                            new("sanitized_url", sanitizedUrl),
-                            new("decompression_time_ms", 
decompressStopwatch.ElapsedMilliseconds),
-                            new("compressed_size_bytes", actualSize),
-                            new("compressed_size_kb", actualSize / 1024.0),
-                            new("decompressed_size_bytes", dataStream.Length),
-                            new("decompressed_size_kb", dataStream.Length / 
1024.0),
-                            new("compression_ratio", compressionRatio)
-                        ]);
+                    // Create a MemoryStream for the compressed data
+                    var compressedStream = new MemoryStream(fileData, 
writable: false);
 
-                        actualSize = dataStream.Length;
-                    }
-                    catch (Exception ex)
-                    {
-                        stopwatch.Stop();
-                        activity?.AddException(ex, [
-                            new("error.context", "cloudfetch.decompression"),
-                            new("offset", downloadResult.Link.StartRowOffset),
-                            new("sanitized_url", sanitizedUrl),
-                            new("elapsed_time_ms", 
stopwatch.ElapsedMilliseconds)
-                        ]);
+                    // Wrap it in an LZ4Stream that will decompress chunks 
on-demand as ArrowStreamReader reads
+                    dataStream = LZ4Stream.Decode(compressedStream);

Review Comment:
   are we moving decompress read time?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to