grundprinzip commented on code in PR #38300:
URL: https://github.com/apache/spark/pull/38300#discussion_r1000995972
##########
connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala:
##########
@@ -59,19 +61,48 @@ class SparkConnectStreamHandler(responseObserver:
StreamObserver[Response]) exte
private def processRows(clientId: String, rows: DataFrame) = {
val timeZoneId = SQLConf.get.sessionLocalTimeZone
- val schema =
- ByteString.copyFrom(ArrowUtils.toArrowSchema(rows.schema,
timeZoneId).toByteArray)
-
- val textSchema = rows.schema.fields.map(f => f.name).mkString("|")
- val data = rows.collect().map(x => x.toSeq.mkString("|")).mkString("\n")
- val bbb = proto.Response.CSVBatch.newBuilder
- .setRowCount(-1)
- .setData(textSchema ++ "\n" ++ data)
- .build()
- val response =
proto.Response.newBuilder().setClientId(clientId).setCsvBatch(bbb).build()
- // Send all the data
- responseObserver.onNext(response)
+ // Only process up to 10MB of data.
+ val sb = new StringBuilder
+ var rowCount = 0
+ rows.toJSON
+ .collect()
+ .foreach(row => {
+ if (sb.size + row.size > MAX_BATCH_SIZE) {
+ val response = proto.Response.newBuilder().setClientId(clientId)
+ val batch = proto.Response.JSONBatch
+ .newBuilder()
+ .setData(ByteString.copyFromUtf8(sb.toString()))
+ .setRowCount(rowCount)
+ .build()
+ response.setJsonBatch(batch)
+ responseObserver.onNext(response.build())
+ // When the data is sent, we have to clear the batch data and reset
the row count for
+ // this batch.
+ sb.clear()
Review Comment:
yes! Thank you
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]