sadikovi commented on code in PR #37485:
URL: https://github.com/apache/spark/pull/37485#discussion_r944776977
##########
sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetDeltaEncodingSuite.scala:
##########
@@ -231,19 +230,33 @@ abstract class ParquetDeltaEncodingSuite[T] extends
ParquetCompatibilityTest
}
private def shouldReadAndWrite(data: Array[T], length: Int): Unit = {
- writeData(data, length)
- reader = new VectorizedDeltaBinaryPackedReader
- val page = writer.getBytes.toByteArray
+ // SPARK-40052: Check that we can handle direct and non-direct byte
buffers depending on the
+ // implementation of ByteBufferInputStream.
+ for (useDirect <- Seq(true, false)) {
+ writeData(data, length)
+ reader = new VectorizedDeltaBinaryPackedReader
+ val page = writer.getBytes.toByteArray
+
+ assert(estimatedSize(length) >= page.length)
+ writableColumnVector = new OnHeapColumnVector(data.length,
getSparkSqlType)
+
+ val buf = if (useDirect) {
+ ByteBuffer.allocateDirect(page.length)
+ } else {
+ ByteBuffer.allocate(page.length)
+ }
+ buf.put(page)
+ buf.rewind()
Review Comment:
In this case, it is probably equivalent as the capacity is set to the the
page length, but yeah, I can update.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]