hvanhovell commented on code in PR #52271: URL: https://github.com/apache/spark/pull/52271#discussion_r2330855138
########## sql/connect/server/src/test/scala/org/apache/spark/sql/connect/planner/SparkConnectServiceSuite.scala: ########## @@ -374,6 +374,132 @@ class SparkConnectServiceSuite } } + test("Arrow batch chunking") { + withEvents { verifyEvents => + val overriddenMaxChunkSize = 100 + withSparkConf( + Connect.CONNECT_SESSION_RESULT_CHUNKING_MAX_CHUNK_SIZE.key -> + overriddenMaxChunkSize.toString) { + val instance = new SparkConnectService(false) + val connect = new MockRemoteSession() + val context = proto.UserContext + .newBuilder() + .setUserId("c1") + .build() + val plan = proto.Plan + .newBuilder() + .setRoot(connect.sql("select id, exp(id) as eid from range(0, 100, 1, 4)")) + .build() + val request = proto.ExecutePlanRequest + .newBuilder() + .setPlan(plan) + .addRequestOptions( + proto.ExecutePlanRequest.RequestOption + .newBuilder() + .setResultChunkingOptions(proto.ResultChunkingOptions + .newBuilder() + .setAllowArrowBatchChunking(true) + .build()) + .build()) + .setUserContext(context) + .setSessionId(UUID.randomUUID.toString()) + .build() + + // Execute plan. + @volatile var done = false + val responses = mutable.Buffer.empty[proto.ExecutePlanResponse] Review Comment: If done need volatile, then this should be synchronized... Unless you are relying on some happens-before cleverness with the volatile variable (if so, then you need to document this)... -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org --------------------------------------------------------------------- To unsubscribe, e-mail: reviews-unsubscr...@spark.apache.org For additional commands, e-mail: reviews-h...@spark.apache.org