HyukjinKwon commented on code in PR #38300:
URL: https://github.com/apache/spark/pull/38300#discussion_r1001301371
##########
connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala:
##########
@@ -57,21 +60,67 @@ class SparkConnectStreamHandler(responseObserver:
StreamObserver[Response]) exte
processRows(request.getClientId, rows)
}
- private def processRows(clientId: String, rows: DataFrame) = {
+ private[connect] def processRows(clientId: String, rows: DataFrame): Unit = {
val timeZoneId = SQLConf.get.sessionLocalTimeZone
- val schema =
- ByteString.copyFrom(ArrowUtils.toArrowSchema(rows.schema,
timeZoneId).toByteArray)
-
- val textSchema = rows.schema.fields.map(f => f.name).mkString("|")
- val data = rows.collect().map(x => x.toSeq.mkString("|")).mkString("\n")
- val bbb = proto.Response.CSVBatch.newBuilder
- .setRowCount(-1)
- .setData(textSchema ++ "\n" ++ data)
- .build()
- val response =
proto.Response.newBuilder().setClientId(clientId).setCsvBatch(bbb).build()
- // Send all the data
- responseObserver.onNext(response)
+ // Only process up to 10MB of data.
+ val sb = new StringBuilder
+ var rowCount = 0
+ rows.toJSON
+ .toLocalIterator()
Review Comment:
Can we directly implement Arrow batch instead of switching to JSON? We have
enough time to implement this until Spark 3.4 release.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]