This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch erlang-21-support
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit eedda07e05f53e1838dc03fda8ed46fac1468f17
Author: Nick Vatamaniuc <vatam...@apache.org>
AuthorDate: Fri Jun 22 11:12:40 2018 -0400

    A better fix for handling node local 413 responses
    
    During 21.0 testing, the hack to read extra data from the socket to give the
    client a better chance of detecting a 413 response ended up failing. Either
    process scheduling or IO scheduling was different enough that the test 
started
    failing fairly consistently.
    
    Apply a better fix that's more in-line with the intent -- after responding 
to
    the client with a 413, read a limited amount of data off the socket (1MB).
    Limit the maximum time spent doing it to less than 1 second.
    
    Issue #1396
---
 src/couch/src/couch_httpd.erl | 7 ++++---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/src/couch/src/couch_httpd.erl b/src/couch/src/couch_httpd.erl
index ec397c2..e66a78e 100644
--- a/src/couch/src/couch_httpd.erl
+++ b/src/couch/src/couch_httpd.erl
@@ -40,6 +40,8 @@
 
 
 -define(HANDLER_NAME_IN_MODULE_POS, 6).
+-define(MAX_DRAIN_BYTES, 1048576).
+-define(MAX_DRAIN_TIME_MSEC, 1000).
 
 start_link() ->
     start_link(http).
@@ -1181,10 +1183,9 @@ respond_(#httpd{mochi_req = MochiReq}, 413, Headers, 
Args, Type) ->
     % just increases the chances of 413 being detected correctly by the client
     % (rather than getting a brutal TCP reset).
     erlang:put(mochiweb_request_force_close, true),
-    Socket = MochiReq:get(socket),
-    mochiweb_socket:recv(Socket, 0, 0),
     Result = MochiReq:Type({413, Headers, Args}),
-    mochiweb_socket:recv(Socket, 0, 0),
+    Socket = MochiReq:get(socket),
+    mochiweb_socket:recv(Socket, ?MAX_DRAIN_BYTES, ?MAX_DRAIN_TIME_MSEC),
     Result;
 respond_(#httpd{mochi_req = MochiReq}, Code, Headers, Args, Type) ->
     MochiReq:Type({Code, Headers, Args}).

Reply via email to