apoorvmittal10 commented on code in PR #19637:
URL: https://github.com/apache/kafka/pull/19637#discussion_r2074232073


##########
core/src/test/scala/unit/kafka/server/ShareFetchAcknowledgeRequestTest.scala:
##########
@@ -2049,10 +2049,11 @@ class ShareFetchAcknowledgeRequestTest(cluster: 
ClusterInstance) extends GroupCo
     // share session with the ShareSessionCache would throw 
SHARE_SESSION_LIMIT_REACHED
     TestUtils.waitUntilTrue(() => {
       val metadata = new ShareRequestMetadata(memberId3, 
ShareRequestMetadata.INITIAL_EPOCH)
-      val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, 
Seq.empty, Map.empty)
+      val shareFetchRequest = createShareFetchRequest(groupId, metadata, send, 
Seq.empty, Map.empty, maxWaitMs=1000)
       val shareFetchResponse = 
connectAndReceiveWithoutClosingSocket[ShareFetchResponse](shareFetchRequest)
       val shareFetchResponseData = shareFetchResponse.data()
-      shareFetchResponseData.errorCode == Errors.SHARE_SESSION_NOT_FOUND.code
+      println("error code received: " + shareFetchResponseData.errorCode)

Review Comment:
   Is it needed?



##########
core/src/main/scala/kafka/server/KafkaApis.scala:
##########
@@ -3049,6 +3049,13 @@ class KafkaApis(val requestChannel: RequestChannel,
       // Creating the shareFetchContext for Share Session Handling. if context 
creation fails, the request is failed directly here.
       shareFetchContext = sharePartitionManager.newContext(groupId, 
shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent, 
request.context.connectionId)
     } catch {
+      case e: ShareSessionLimitReachedException =>
+        
sharePartitionManager.createDelayedErrorFuture(shareFetchRequest.maxWait, 
e).whenComplete((_, exception) => {
+          if (exception != null) {
+            requestHelper.sendMaybeThrottle(request, 
shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, 
exception))
+          }
+        })
+        return

Review Comment:
   Can't we use existing throttling to ask client not send next request as per 
max wait. I don't think we should go with the route of another scheduler thread 
to solve this issue. And I understand we can't block DataPlane thread either 
hence you created another thread. So my suggestion is to use throotling where 
client itself will not send the next fetch request. Can you check that once.



##########
core/src/main/scala/kafka/server/KafkaApis.scala:
##########
@@ -3049,6 +3049,13 @@ class KafkaApis(val requestChannel: RequestChannel,
       // Creating the shareFetchContext for Share Session Handling. if context 
creation fails, the request is failed directly here.
       shareFetchContext = sharePartitionManager.newContext(groupId, 
shareFetchData, forgottenTopics, newReqMetadata, isAcknowledgeDataPresent, 
request.context.connectionId)
     } catch {
+      case e: ShareSessionLimitReachedException =>
+        
sharePartitionManager.createDelayedErrorFuture(shareFetchRequest.maxWait, 
e).whenComplete((_, exception) => {
+          if (exception != null) {
+            requestHelper.sendMaybeThrottle(request, 
shareFetchRequest.getErrorResponse(AbstractResponse.DEFAULT_THROTTLE_TIME, 
exception))
+          }
+        })
+        return

Review Comment:
   In case we decide to go on broker to striclty wait then we should perhaps 
look for other solutions.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: jira-unsubscr...@kafka.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to