This is an automated email from the ASF dual-hosted git repository.

jan pushed a commit to branch 3.x
in repository https://gitbox.apache.org/repos/asf/couchdb.git


The following commit(s) were added to refs/heads/3.x by this push:
     new b9a3223  feat: make the timeout for receiving requests from attachment 
writers configurable
b9a3223 is described below

commit b9a322344aa6e86cf81af61300d697619f545311
Author: James Coglan <[email protected]>
AuthorDate: Mon Mar 7 14:52:37 2022 +0000

    feat: make the timeout for receiving requests from attachment writers 
configurable
    
    The code that forwards attachment data to cluster nodes via fabric has a
    hard-coded timeout of five minutes for nodes to request the data. Making
    this configurable lets us mitigate the impact of issue #3939 [1], which
    causes requests to block if one of the nodes already has the given
    attachment and doesn't end up requesting the data for it.
    
    [1]: https://github.com/apache/couchdb/issues/3939
---
 rel/overlay/etc/default.ini             | 4 ++++
 src/couch/src/couch_httpd_multipart.erl | 7 +++++--
 2 files changed, 9 insertions(+), 2 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 3c15ae9..6b64c6d 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -80,6 +80,10 @@ view_index_dir = {{view_index_dir}}
 ; Allow edits on the _security object in the user db. By default, it's 
disabled.
 ;users_db_security_editable = false
 
+; Sets the maximum time that the coordinator node will wait for cluster members
+; to request attachment data before returning a response to the client.
+;attachment_writer_timeout = 300000
+
 [purge]
 ; Allowed maximum number of documents in one purge request
 ;max_document_id_number = 100
diff --git a/src/couch/src/couch_httpd_multipart.erl 
b/src/couch/src/couch_httpd_multipart.erl
index ecdf105..95a2c9e 100644
--- a/src/couch/src/couch_httpd_multipart.erl
+++ b/src/couch/src/couch_httpd_multipart.erl
@@ -122,7 +122,7 @@ mp_parse_atts(eof, {Ref, Chunks, Offset, Counters, 
Waiting}) ->
                             NewAcc = {Ref, Chunks, Offset, C2, Waiting -- 
[WriterPid]},
                             mp_parse_atts(eof, NewAcc)
                     end
-            after 300000 ->
+            after att_writer_timeout() ->
                 ok
             end
     end.
@@ -198,7 +198,7 @@ maybe_send_data({Ref, Chunks, Offset, Counters, Waiting}) ->
                     {get_bytes, Ref, X} ->
                         C2 = update_writer(X, Counters),
                         maybe_send_data({Ref, NewChunks, NewOffset, C2, [X | 
NewWaiting]})
-                after 300000 ->
+                after att_writer_timeout() ->
                     abort_parsing
                 end
         end
@@ -243,6 +243,9 @@ num_mp_writers() ->
         Count -> Count
     end.
 
+att_writer_timeout() ->
+    config:get_integer("couchdb", "attachment_writer_timeout", 300000).
+
 encode_multipart_stream(_Boundary, JsonBytes, [], WriteFun, _AttFun) ->
     WriteFun(JsonBytes);
 encode_multipart_stream(Boundary, JsonBytes, Atts, WriteFun, AttFun) ->

Reply via email to