zhengruifeng commented on code in PR #38300:
URL: https://github.com/apache/spark/pull/38300#discussion_r1000317203


##########
connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala:
##########
@@ -59,19 +61,48 @@ class SparkConnectStreamHandler(responseObserver: 
StreamObserver[Response]) exte
 
   private def processRows(clientId: String, rows: DataFrame) = {
     val timeZoneId = SQLConf.get.sessionLocalTimeZone
-    val schema =
-      ByteString.copyFrom(ArrowUtils.toArrowSchema(rows.schema, 
timeZoneId).toByteArray)
-
-    val textSchema = rows.schema.fields.map(f => f.name).mkString("|")
-    val data = rows.collect().map(x => x.toSeq.mkString("|")).mkString("\n")
-    val bbb = proto.Response.CSVBatch.newBuilder
-      .setRowCount(-1)
-      .setData(textSchema ++ "\n" ++ data)
-      .build()
-    val response = 
proto.Response.newBuilder().setClientId(clientId).setCsvBatch(bbb).build()
 
-    // Send all the data
-    responseObserver.onNext(response)
+    // Only process up to 10MB of data.
+    val sb = new StringBuilder
+    var rowCount = 0
+    rows.toJSON
+      .collect()

Review Comment:
   I think we should use `toLocalIterator` instead of `collect` here to support 
large dataset



##########
connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala:
##########
@@ -59,19 +61,48 @@ class SparkConnectStreamHandler(responseObserver: 
StreamObserver[Response]) exte
 
   private def processRows(clientId: String, rows: DataFrame) = {
     val timeZoneId = SQLConf.get.sessionLocalTimeZone
-    val schema =
-      ByteString.copyFrom(ArrowUtils.toArrowSchema(rows.schema, 
timeZoneId).toByteArray)
-
-    val textSchema = rows.schema.fields.map(f => f.name).mkString("|")
-    val data = rows.collect().map(x => x.toSeq.mkString("|")).mkString("\n")
-    val bbb = proto.Response.CSVBatch.newBuilder
-      .setRowCount(-1)
-      .setData(textSchema ++ "\n" ++ data)
-      .build()
-    val response = 
proto.Response.newBuilder().setClientId(clientId).setCsvBatch(bbb).build()
 
-    // Send all the data
-    responseObserver.onNext(response)
+    // Only process up to 10MB of data.
+    val sb = new StringBuilder
+    var rowCount = 0
+    rows.toJSON
+      .collect()
+      .foreach(row => {
+        if (sb.size + row.size > MAX_BATCH_SIZE) {
+          val response = proto.Response.newBuilder().setClientId(clientId)
+          val batch = proto.Response.JSONBatch
+            .newBuilder()
+            .setData(ByteString.copyFromUtf8(sb.toString()))
+            .setRowCount(rowCount)
+            .build()
+          response.setJsonBatch(batch)
+          responseObserver.onNext(response.build())
+          // When the data is sent, we have to clear the batch data and reset 
the row count for
+          // this batch.
+          sb.clear()

Review Comment:
   missing `sb.append(row)` ?



##########
connector/connect/src/main/scala/org/apache/spark/sql/connect/service/SparkConnectStreamHandler.scala:
##########
@@ -59,19 +61,48 @@ class SparkConnectStreamHandler(responseObserver: 
StreamObserver[Response]) exte
 
   private def processRows(clientId: String, rows: DataFrame) = {
     val timeZoneId = SQLConf.get.sessionLocalTimeZone
-    val schema =
-      ByteString.copyFrom(ArrowUtils.toArrowSchema(rows.schema, 
timeZoneId).toByteArray)
-
-    val textSchema = rows.schema.fields.map(f => f.name).mkString("|")
-    val data = rows.collect().map(x => x.toSeq.mkString("|")).mkString("\n")
-    val bbb = proto.Response.CSVBatch.newBuilder
-      .setRowCount(-1)
-      .setData(textSchema ++ "\n" ++ data)
-      .build()
-    val response = 
proto.Response.newBuilder().setClientId(clientId).setCsvBatch(bbb).build()
 
-    // Send all the data
-    responseObserver.onNext(response)
+    // Only process up to 10MB of data.
+    val sb = new StringBuilder
+    var rowCount = 0
+    rows.toJSON
+      .collect()
+      .foreach(row => {
+        if (sb.size + row.size > MAX_BATCH_SIZE) {
+          val response = proto.Response.newBuilder().setClientId(clientId)
+          val batch = proto.Response.JSONBatch
+            .newBuilder()
+            .setData(ByteString.copyFromUtf8(sb.toString()))
+            .setRowCount(rowCount)
+            .build()
+          response.setJsonBatch(batch)
+          responseObserver.onNext(response.build())
+          // When the data is sent, we have to clear the batch data and reset 
the row count for
+          // this batch.
+          sb.clear()
+          rowCount = 0
+        } else {
+          // Make sure to put the newline delimiters only between items and 
not at the end.
+          if (rowCount > 0) {
+            sb.append("\n")

Review Comment:
   is it safe to use `\n` as the delimiters? `row` may also contains `\n`?
   what about using `repeated string` to carry the rows?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to