mbutrovich commented on code in PR #3703:
URL: https://github.com/apache/datafusion-comet/pull/3703#discussion_r2942897839


##########
common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala:
##########
@@ -252,6 +255,101 @@ object Utils extends CometTypeShim {
     new ArrowReaderIterator(Channels.newChannel(ins), source)
   }
 
+  /**
+   * Coalesces many small ChunkedByteBuffers (one per source partition) into a 
single
+   * ChunkedByteBuffer. Without coalescing, each consumer task in a broadcast 
hash join
+   * deserializes N separate Arrow IPC streams (one per source partition), 
which dominates
+   * build-side time when partition counts are high (e.g. 200+ partitions in 
TPC-H Q18).
+   *
+   * We decode and append all source batches into one VectorSchemaRoot on the 
driver, then
+   * re-serialize once via ArrowStreamWriter. This is done on the driver (not 
per-task) so the
+   * cost is paid once rather than once per consumer partition.
+   */
+  def coalesceBroadcastBatches(
+      input: Iterator[ChunkedByteBuffer]): (Array[ChunkedByteBuffer], Long, 
Long) = {
+    val buffers = input.filterNot(_.size == 0).toArray
+    if (buffers.isEmpty) {
+      return (Array.empty, 0L, 0L)
+    }
+
+    val allocator = org.apache.comet.CometArrowAllocator
+      .newChildAllocator("broadcast-coalesce", 0, Long.MaxValue)
+    try {
+      var targetRoot: VectorSchemaRoot = null
+      var totalRows = 0L
+      var batchCount = 0
+
+      try {
+        for (bytes <- buffers) {
+          val codec = CompressionCodec.createCodec(SparkEnv.get.conf)
+          val cbbis = bytes.toInputStream()
+          val ins = new DataInputStream(codec.compressedInputStream(cbbis))
+          val channel = Channels.newChannel(ins)
+          val reader = new ArrowStreamReader(channel, allocator)
+          try {
+            // Comet decodes dictionaries during execution, so this shouldn't 
happen.
+            // If it does, fall back to the original uncoalesced buffers 
because each
+            // partition can have a different dictionary, and appending index 
vectors
+            // would silently mix indices from incompatible dictionaries.
+            if (!reader.getDictionaryVectors.isEmpty) {
+              logWarning(
+                "Unexpected dictionary-encoded column during BroadcastExchange 
coalescing; " +
+                  "skipping coalesce")
+              reader.close()
+              if (targetRoot != null) {
+                targetRoot.close()
+                targetRoot = null
+              }
+              return (buffers, 0L, 0L)
+            }
+            while (reader.loadNextBatch()) {
+              val sourceRoot = reader.getVectorSchemaRoot
+              if (targetRoot == null) {
+                targetRoot = VectorSchemaRoot.create(sourceRoot.getSchema, 
allocator)
+                targetRoot.allocateNew()
+              }
+              VectorSchemaRootAppender.append(targetRoot, sourceRoot)
+              totalRows += sourceRoot.getRowCount
+              batchCount += 1
+            }
+          } finally {
+            reader.close()
+          }
+        }
+
+        if (targetRoot == null) {
+          return (Array.empty, 0L, 0L)
+        }
+
+        assert(
+          targetRoot.getRowCount.toLong == totalRows,
+          s"Row count mismatch after coalesce: ${targetRoot.getRowCount} != 
$totalRows")
+
+        logInfo(s"Coalesced $batchCount broadcast batches into 1 ($totalRows 
rows)")
+
+        val outCodec = CompressionCodec.createCodec(SparkEnv.get.conf)
+        val cbbos = new ChunkedByteBufferOutputStream(1024 * 1024, 
ByteBuffer.allocate)

Review Comment:
   Renamed stuff a bit. I copied from the existing code above, but you're right 
that's not very readable. Thanks!



##########
common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala:
##########
@@ -252,6 +255,101 @@ object Utils extends CometTypeShim {
     new ArrowReaderIterator(Channels.newChannel(ins), source)
   }
 
+  /**
+   * Coalesces many small ChunkedByteBuffers (one per source partition) into a 
single
+   * ChunkedByteBuffer. Without coalescing, each consumer task in a broadcast 
hash join
+   * deserializes N separate Arrow IPC streams (one per source partition), 
which dominates
+   * build-side time when partition counts are high (e.g. 200+ partitions in 
TPC-H Q18).
+   *
+   * We decode and append all source batches into one VectorSchemaRoot on the 
driver, then
+   * re-serialize once via ArrowStreamWriter. This is done on the driver (not 
per-task) so the
+   * cost is paid once rather than once per consumer partition.
+   */
+  def coalesceBroadcastBatches(
+      input: Iterator[ChunkedByteBuffer]): (Array[ChunkedByteBuffer], Long, 
Long) = {
+    val buffers = input.filterNot(_.size == 0).toArray
+    if (buffers.isEmpty) {
+      return (Array.empty, 0L, 0L)
+    }
+
+    val allocator = org.apache.comet.CometArrowAllocator
+      .newChildAllocator("broadcast-coalesce", 0, Long.MaxValue)
+    try {
+      var targetRoot: VectorSchemaRoot = null
+      var totalRows = 0L
+      var batchCount = 0
+
+      try {
+        for (bytes <- buffers) {
+          val codec = CompressionCodec.createCodec(SparkEnv.get.conf)
+          val cbbis = bytes.toInputStream()
+          val ins = new DataInputStream(codec.compressedInputStream(cbbis))

Review Comment:
   Renamed stuff a bit. I copied from the existing code above, but you're right 
that's not very readable. Thanks!



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to