the-other-tim-brown commented on code in PR #18098:
URL: https://github.com/apache/hudi/pull/18098#discussion_r2957372453


##########
hudi-spark-datasource/hudi-spark/src/test/scala/org/apache/hudi/blob/TestReadBlobSQL.scala:
##########
@@ -0,0 +1,358 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi.blob
+
+import org.apache.hudi.blob.BlobTestHelpers._
+import org.apache.hudi.testutils.HoodieClientTestBase
+
+import org.apache.spark.sql.functions._
+import org.junit.jupiter.api.Assertions._
+import org.junit.jupiter.api.Test
+
+/**
+ * Tests for the read_blob() SQL function.
+ *
+ * This test suite verifies:
+ * <ul>
+ *   <li>Basic SQL integration with read_blob()</li>
+ *   <li>Integration with WHERE clauses, JOINs</li>
+ *   <li>Configuration parameter handling</li>
+ *   <li>Error handling for invalid inputs</li>
+ * </ul>
+ */
+class TestReadBlobSQL extends HoodieClientTestBase {
+
+  @Test
+  def testBasicReadBlobSQL(): Unit = {
+    val filePath = createTestFile(tempDir, "basic.bin", 10000)
+
+    // Create table with blob column
+    val df = sparkSession.createDataFrame(Seq(
+      (1, "record1", filePath, 0L, 100L),
+      (2, "record2", filePath, 100L, 100L),
+      (3, "record3", filePath, 200L, 100L)
+    )).toDF("id", "name", "external_path", "offset", "length")
+      .withColumn("file_info",
+        blobStructCol("file_info", col("external_path"), col("offset"), 
col("length")))
+      .select("id", "name", "file_info")
+
+    df.createOrReplaceTempView("test_table")
+
+    // Use SQL with read_blob
+    val result = sparkSession.sql("""
+      SELECT id, name, read_blob(file_info) as data
+      FROM test_table
+      WHERE id <= 2
+    """)
+
+    val rows = result.collect()
+    assertEquals(2, rows.length)
+
+    // Verify data is binary
+    val data1 = rows(0).getAs[Array[Byte]]("data")
+    assertEquals(100, data1.length)
+
+    // Verify content matches expected pattern
+    assertBytesContent(data1)
+
+    val data2 = rows(1).getAs[Array[Byte]]("data")
+    assertEquals(100, data2.length)
+    assertBytesContent(data2, expectedOffset = 100)
+  }
+
+  @Test
+  def testReadBlobWithJoin(): Unit = {
+    val filePath = createTestFile(tempDir, "join.bin", 10000)
+
+    // Create blob table
+    val blobDF = sparkSession.createDataFrame(Seq(
+      (1, filePath, 0L, 100L),
+      (2, filePath, 100L, 100L)
+    )).toDF("id", "external_path", "offset", "length")
+      .withColumn("file_info",
+        blobStructCol("file_info", col("external_path"), col("offset"), 
col("length")))
+      .select("id", "file_info")
+
+    blobDF.createOrReplaceTempView("blob_table")
+
+    // Create metadata table
+    val metaDF = sparkSession.createDataFrame(Seq(
+      (1, "Alice"),
+      (2, "Bob")
+    )).toDF("id", "name")
+
+    metaDF.createOrReplaceTempView("meta_table")
+
+    // SQL with JOIN
+    val result = sparkSession.sql("""
+      SELECT m.id, m.name, read_blob(b.file_info) as data
+      FROM meta_table m
+      JOIN blob_table b ON m.id = b.id
+      ORDER BY m.id
+    """)
+
+    val rows = result.collect()
+    assertEquals(2, rows.length)
+    assertEquals("Alice", rows(0).getAs[String]("name"))
+    assertEquals(100, rows(0).getAs[Array[Byte]]("data").length)
+    assertEquals("Bob", rows(1).getAs[String]("name"))
+    assertEquals(100, rows(1).getAs[Array[Byte]]("data").length)
+
+    // Verify data content
+    val data1 = rows(0).getAs[Array[Byte]]("data")
+    assertBytesContent(data1)
+  }
+
+  @Test
+  def testReadBlobWithOrderBy(): Unit = {
+    val filePath = createTestFile(tempDir, "order.bin", 10000)
+
+    val df = sparkSession.createDataFrame(Seq(
+      (3, filePath, 200L, 50L),
+      (1, filePath, 0L, 50L),
+      (2, filePath, 100L, 50L)
+    )).toDF("id", "external_path", "offset", "length")
+      .withColumn("file_info",
+        blobStructCol("file_info", col("external_path"), col("offset"), 
col("length")))
+      .select("id", "file_info")
+
+    df.createOrReplaceTempView("order_table")
+
+    // SQL with ORDER BY
+    val result = sparkSession.sql("""
+      SELECT id, read_blob(file_info) as data
+      FROM order_table
+      ORDER BY id
+    """)
+
+    val rows = result.collect()
+    assertEquals(3, rows.length)
+    assertEquals(1, rows(0).getAs[Int]("id"))
+    assertEquals(2, rows(1).getAs[Int]("id"))
+    assertEquals(3, rows(2).getAs[Int]("id"))
+
+    // Verify data content for ordered results
+    val data1 = rows(0).getAs[Array[Byte]]("data")
+    assertBytesContent(data1)
+  }
+
+  @Test
+  def testReadBlobInSubquery(): Unit = {
+    val filePath = createTestFile(tempDir, "subquery.bin", 10000)
+
+    val df = sparkSession.createDataFrame(Seq(
+      (1, "A", filePath, 0L, 100L),
+      (2, "A", filePath, 100L, 100L),
+      (3, "B", filePath, 200L, 100L)
+    )).toDF("id", "category", "external_path", "offset", "length")
+      .withColumn("file_info",
+        blobStructCol("file_info", col("external_path"), col("offset"), 
col("length")))
+      .select("id", "category", "file_info")
+
+    df.createOrReplaceTempView("subquery_table")
+
+    // SQL with subquery
+    val result = sparkSession.sql("""
+      SELECT * FROM (
+        SELECT id, category, read_blob(file_info) as data
+        FROM subquery_table
+      ) WHERE category = 'A'
+    """)
+
+    val rows = result.collect()
+    assertEquals(2, rows.length)
+    rows.foreach { row =>
+      assertEquals("A", row.getAs[String]("category"))
+      assertEquals(100, row.getAs[Array[Byte]]("data").length)
+    }
+  }
+
+  @Test
+  def testConfigurationParameters(): Unit = {
+    val filePath = createTestFile(tempDir, "config.bin", 50000)
+
+    val df = sparkSession.createDataFrame(Seq(
+      (1, filePath, 0L, 100L),
+      (2, filePath, 5000L, 100L),  // 4.9KB gap
+      (3, filePath, 10000L, 100L)
+    )).toDF("id", "external_path", "offset", "length")
+      .withColumn("file_info",
+        blobStructCol("file_info", col("external_path"), col("offset"), 
col("length")))
+      .select("id", "file_info")
+
+    df.createOrReplaceTempView("config_table")
+
+    // Use withSparkConfig to automatically manage configuration
+    withSparkConfig(sparkSession, Map(
+      "hoodie.blob.batching.max.gap.bytes" -> "10000",
+      "hoodie.blob.batching.lookahead.size" -> "100"
+    )) {
+      val result = sparkSession.sql("""
+        SELECT id, read_blob(file_info) as data
+        FROM config_table
+      """)
+
+      val rows = result.collect()
+      assertEquals(3, rows.length)
+
+      // Verify all reads completed successfully
+      rows.foreach { row =>
+        assertEquals(100, row.getAs[Array[Byte]]("data").length)
+      }
+    }
+  }
+
+  @Test
+  def testMultipleReadBlobInSameQuery(): Unit = {

Review Comment:
   Good catch, updating the PR with the fix



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to