Github user icexelloss commented on a diff in the pull request:
https://github.com/apache/spark/pull/18732#discussion_r142438142
--- Diff:
sql/core/src/main/scala/org/apache/spark/sql/execution/python/ArrowEvalPythonExec.scala
---
@@ -44,14 +66,24 @@ case class ArrowEvalPythonExec(udfs: Seq[PythonUDF],
output: Seq[Attribute], chi
val schemaOut =
StructType.fromAttributes(output.drop(child.output.length).zipWithIndex
.map { case (attr, i) => attr.withName(s"_$i") })
+ val batchSize = conf.arrowMaxRecordsPerBatch
+
+ val batchIter = if (batchSize > 0) {
+ new BatchIterator(iter, batchSize)
+ } else if (batchSize == 0) {
+ Iterator(iter)
+ } else {
+ throw new IllegalArgumentException(s"MaxRecordsPerBatch must be >=
0, but is $batchSize")
--- End diff --
Fixed.
---
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]