juliuszsompolski commented on code in PR #41315: URL: https://github.com/apache/spark/pull/41315#discussion_r1260922955
########## connector/connect/server/src/main/scala/org/apache/spark/sql/connect/execution/ExecutePlanResponseObserver.scala: ########## @@ -0,0 +1,111 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.spark.sql.connect.execution + +import scala.collection.mutable.ListBuffer + +import io.grpc.stub.StreamObserver + +import org.apache.spark.connect.proto.ExecutePlanResponse + +/** + * Container for ExecutePlanResponses responses. + * + * This StreamObserver is running on the execution thread and saves the responses, + * it notifies the ExecutePlanResponseSender about available responses. + * + * @param responseObserver + */ +private[connect] class ExecutePlanResponseObserver() extends StreamObserver[ExecutePlanResponse] { + + // Cached stream state. + private val responses = new ListBuffer[CachedExecutePlanResponse]() + private var error: Option[Throwable] = None + private var completed: Boolean = false + private var lastIndex: Option[Long] = None // index of last response before completed. + private var index: Long = 0 // first response will have index 1 + + // sender to notify of available responses. + private var responseSender: Option[ExecutePlanResponseSender] = None + + def onNext(r: ExecutePlanResponse): Unit = synchronized { + if (lastIndex.nonEmpty) { + throw new IllegalStateException("Stream onNext can't be called after stream completed") + } + index += 1 + responses += CachedExecutePlanResponse(r, index) + notifySender() + } + + def onError(t: Throwable): Unit = synchronized { + if (lastIndex.nonEmpty) { + throw new IllegalStateException("Stream onError can't be called after stream completed") + } + error = Some(t) Review Comment: Adding to what we were talking: * This picks up errors of ExecutePlan, but it wouldn't be hard to extend this execution mechanism to other RPCs. * However, this will pick up errors on the execution thread only. If errors happen on the grpc thread (these would be more internal errors like unable to start execution thread. So those would need a different mechanism. TBH, if I were to tabula-rasa design it, I would not use GRPC errors for application errors (like errors for Spark execution), but have these returned as an explicit message type, and reserve GRPC onError for server errors. That would free us from GRPC onError size limitations that we now need such workarounds for. That would make it easier to distinguish network/framework errors from user errors, which would make it easier to establish retry policies, or to keep stats on user errors vs. system errors... But such change couldn't be done backwards compatible at this point. Maybe for Spark 4.0? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
