This is an automated email from the ASF dual-hosted git repository.

lserris pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/master by this push:
     new 03189dd9db Allow configurable number of accepts per event loop (#10098)
03189dd9db is described below

commit 03189dd9db15858654673baafbfd9270af8d50f1
Author: Nathan Wang <wang.natha...@gmail.com>
AuthorDate: Fri Sep 22 13:43:32 2023 -0700

    Allow configurable number of accepts per event loop (#10098)
    
    * removal of accept_till_done, introduce reloadable variable 
additional_accepts to control how many connections are accepted per accept 
event loop
    
    * whitespace removal, some other formatting changes
    
    * missed another whitespace
    
    * int32_max overflow issue fixed
    
    * more descriptive config info
    
    * addressing comments
    
    * missing count increment
    
    ---------
    
    Co-authored-by: Nathan Wang <nathan_c_w...@apple.com>
---
 doc/admin-guide/files/records.yaml.en.rst         | 12 +++++++
 doc/appendices/command-line/traffic_server.en.rst |  2 --
 iocore/net/NetHandler.cc                          | 13 +++++++
 iocore/net/NetHandler.h                           |  2 ++
 iocore/net/P_NetAccept.h                          |  2 --
 iocore/net/UnixNetAccept.cc                       | 42 +++++++++++++++++------
 src/records/RecordsConfig.cc                      |  3 ++
 src/traffic_server/traffic_server.cc              |  1 -
 8 files changed, 61 insertions(+), 16 deletions(-)

diff --git a/doc/admin-guide/files/records.yaml.en.rst 
b/doc/admin-guide/files/records.yaml.en.rst
index c4c12c87b8..633eb2ef8f 100644
--- a/doc/admin-guide/files/records.yaml.en.rst
+++ b/doc/admin-guide/files/records.yaml.en.rst
@@ -476,6 +476,18 @@ Thread Variables
 Network
 =======
 
+.. ts:cv:: CONFIG proxy.config.net.additional_accepts INT -1
+   :reloadable:
+
+   This config addresses an issue that can sometimes happen if threads are 
caught in
+   a net accept while loop, become busy exclusviely accepting connections, and 
are prevented
+   from doing other work. This can cause an increase in latency and average 
event
+   loop time. When set to 0, a thread accepts only 1 connection per event loop.
+   When set to any other positive integer x, a thread will accept up to x+1 
connections
+   per event loop. When set to -1 (default), a thread will accept connections 
as long
+   as there are connections waiting in its listening queue.is equivalent to 
"accept all",
+   and setting to 0 is equivalent to "accept one".
+
 .. ts:cv:: CONFIG proxy.config.net.connections_throttle INT 30000
 
    The total number of client and origin server connections that the server
diff --git a/doc/appendices/command-line/traffic_server.en.rst 
b/doc/appendices/command-line/traffic_server.en.rst
index af88c6d33a..e3f89fcb68 100644
--- a/doc/appendices/command-line/traffic_server.en.rst
+++ b/doc/appendices/command-line/traffic_server.en.rst
@@ -32,8 +32,6 @@ Options
 
 .. option:: -a, --accepts_thread
 
-.. option:: -b, --accept_till_done
-
 .. option:: -B TAGS, --action_tags TAGS
 
 .. option:: --bind_stdout FILE
diff --git a/iocore/net/NetHandler.cc b/iocore/net/NetHandler.cc
index 3cafbcc0aa..4e49eeb850 100644
--- a/iocore/net/NetHandler.cc
+++ b/iocore/net/NetHandler.cc
@@ -131,6 +131,9 @@ NetHandler::update_nethandler_config(const char *str, 
RecDataT, RecData data, vo
   } else if (name == "proxy.config.net.default_inactivity_timeout"sv) {
     updated_member = &NetHandler::global_config.default_inactivity_timeout;
     Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to 
%" PRId64, data.rec_int);
+  } else if (name == "proxy.config.net.additional_accepts"sv) {
+    updated_member = &NetHandler::global_config.additional_accepts;
+    Debug("net_queue", "proxy.config.net.additional_accepts updated to %" 
PRId64, data.rec_int);
   }
 
   if (updated_member) {
@@ -166,6 +169,7 @@ NetHandler::init_for_process()
   REC_ReadConfigInt32(global_config.transaction_no_activity_timeout_in, 
"proxy.config.net.transaction_no_activity_timeout_in");
   REC_ReadConfigInt32(global_config.keep_alive_no_activity_timeout_in, 
"proxy.config.net.keep_alive_no_activity_timeout_in");
   REC_ReadConfigInt32(global_config.default_inactivity_timeout, 
"proxy.config.net.default_inactivity_timeout");
+  REC_ReadConfigInt32(global_config.additional_accepts, 
"proxy.config.net.additional_accepts");
 
   RecRegisterConfigUpdateCb("proxy.config.net.max_connections_in", 
update_nethandler_config, nullptr);
   RecRegisterConfigUpdateCb("proxy.config.net.max_requests_in", 
update_nethandler_config, nullptr);
@@ -173,6 +177,7 @@ NetHandler::init_for_process()
   
RecRegisterConfigUpdateCb("proxy.config.net.transaction_no_activity_timeout_in",
 update_nethandler_config, nullptr);
   
RecRegisterConfigUpdateCb("proxy.config.net.keep_alive_no_activity_timeout_in", 
update_nethandler_config, nullptr);
   RecRegisterConfigUpdateCb("proxy.config.net.default_inactivity_timeout", 
update_nethandler_config, nullptr);
+  RecRegisterConfigUpdateCb("proxy.config.net.additional_accepts", 
update_nethandler_config, nullptr);
 
   Debug("net_queue", "proxy.config.net.max_connections_in updated to %d", 
global_config.max_connections_in);
   Debug("net_queue", "proxy.config.net.max_requests_in updated to %d", 
global_config.max_requests_in);
@@ -182,6 +187,7 @@ NetHandler::init_for_process()
   Debug("net_queue", "proxy.config.net.keep_alive_no_activity_timeout_in 
updated to %d",
         global_config.keep_alive_no_activity_timeout_in);
   Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to 
%d", global_config.default_inactivity_timeout);
+  Debug("net_queue", "proxy.config.net.additional_accepts updated to %d", 
global_config.additional_accepts);
 }
 
 //
@@ -570,3 +576,10 @@ NetHandler::remove_from_active_queue(NetEvent *ne)
     --active_queue_size;
   }
 }
+
+int
+NetHandler::get_additional_accepts()
+{
+  int config_value = config.additional_accepts + 1;
+  return (config_value > 0 ? config_value : INT32_MAX - 1);
+}
diff --git a/iocore/net/NetHandler.h b/iocore/net/NetHandler.h
index d218d8ccc6..6e55099370 100644
--- a/iocore/net/NetHandler.h
+++ b/iocore/net/NetHandler.h
@@ -115,6 +115,7 @@ public:
     uint32_t transaction_no_activity_timeout_in = 0;
     uint32_t keep_alive_no_activity_timeout_in  = 0;
     uint32_t default_inactivity_timeout         = 0;
+    uint32_t additional_accepts                 = 0;
 
     /** Return the address of the first value in this struct.
 
@@ -164,6 +165,7 @@ public:
   void remove_from_keep_alive_queue(NetEvent *ne);
   bool add_to_active_queue(NetEvent *ne);
   void remove_from_active_queue(NetEvent *ne);
+  int get_additional_accepts();
 
   /// Per process initialization logic.
   static void init_for_process();
diff --git a/iocore/net/P_NetAccept.h b/iocore/net/P_NetAccept.h
index f44c70b517..6684b08324 100644
--- a/iocore/net/P_NetAccept.h
+++ b/iocore/net/P_NetAccept.h
@@ -120,8 +120,6 @@ struct NetAccept : public Continuation {
 
   explicit NetAccept(const NetProcessor::AcceptOptions &);
   ~NetAccept() override { action_ = nullptr; }
-
-  static int accept_till_done;
 };
 
 extern Ptr<ProxyMutex> naVecMutex;
diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc
index 2ac59090c2..42fa0164f2 100644
--- a/iocore/net/UnixNetAccept.cc
+++ b/iocore/net/UnixNetAccept.cc
@@ -28,8 +28,6 @@
 
 using NetAcceptHandler = int (NetAccept::*)(int, void *);
 
-int NetAccept::accept_till_done = 1;
-
 namespace
 {
 
@@ -53,10 +51,12 @@ net_accept(NetAccept *na, void *ep, bool blockable)
   Event *e               = static_cast<Event *>(ep);
   int res                = 0;
   int count              = 0;
-  const int loop         = NetAccept::accept_till_done;
   UnixNetVConnection *vc = nullptr;
   Connection con;
 
+  EThread *t             = e->ethread;
+  int additional_accepts = get_NetHandler(t)->get_additional_accepts();
+
   if (!blockable) {
     if (!MUTEX_TAKE_TRY_LOCK(na->action_->mutex, e->ethread)) {
       return 0;
@@ -88,7 +88,7 @@ net_accept(NetAccept *na, void *ep, bool blockable)
       goto Ldone; // note: @a con will clean up the socket when it goes out of 
scope.
     }
 
-    ++count;
+    count++;
     Metrics::increment(net_rsb.connections_currently_open);
     vc->id = net_next_connection_number();
     vc->con.move(con);
@@ -129,12 +129,20 @@ net_accept(NetAccept *na, void *ep, bool blockable)
       vc->mutex = h->mutex;
       t->schedule_imm(vc);
     }
-  } while (loop);
+  } while (count < additional_accepts);
 
 Ldone:
   if (!blockable) {
     MUTEX_UNTAKE_LOCK(na->action_->mutex, e->ethread);
   }
+
+  // if we stop looping as a result of hitting the accept limit,
+  // resechedule accepting to the end of the thread event queue
+  // for the goal of fairness between accepting and other work
+  Debug("iocore_net_accepts", "exited accept loop - count: %d, limit: %d", 
count, additional_accepts);
+  if (count >= additional_accepts) {
+    this_ethread()->schedule_imm_local(na);
+  }
   return count;
 }
 
@@ -290,11 +298,13 @@ int
 NetAccept::do_blocking_accept(EThread *t)
 {
   int res                = 0;
-  const int loop         = NetAccept::accept_till_done;
   UnixNetVConnection *vc = nullptr;
   Connection con;
   con.sock_type = SOCK_STREAM;
 
+  int count              = 0;
+  int additional_accepts = get_NetHandler(t)->get_additional_accepts();
+
   // do-while for accepting all the connections
   // added by YTS Team, yamsat
   do {
@@ -345,6 +355,7 @@ NetAccept::do_blocking_accept(EThread *t)
       return -1;
     }
 
+    count++;
     Metrics::increment(net_rsb.connections_currently_open);
     vc->id = net_next_connection_number();
     vc->con.move(con);
@@ -377,7 +388,7 @@ NetAccept::do_blocking_accept(EThread *t)
     // Assign NetHandler->mutex to NetVC
     vc->mutex = h->mutex;
     localt->schedule_imm(vc);
-  } while (loop);
+  } while (count < additional_accepts);
 
   return 1;
 }
@@ -433,7 +444,10 @@ NetAccept::acceptFastEvent(int event, void *ep)
   con.sock_type = SOCK_STREAM;
 
   UnixNetVConnection *vc = nullptr;
-  const int loop         = NetAccept::accept_till_done;
+  int count              = 0;
+  EThread *t             = e->ethread;
+  NetHandler *h          = get_NetHandler(t);
+  int additional_accepts = h->get_additional_accepts();
 
   do {
     socklen_t sz = sizeof(con.addr);
@@ -498,6 +512,7 @@ NetAccept::acceptFastEvent(int event, void *ep)
     vc = (UnixNetVConnection 
*)this->getNetProcessor()->allocate_vc(e->ethread);
     ink_release_assert(vc);
 
+    count++;
     Metrics::increment(net_rsb.connections_currently_open);
     vc->id = net_next_connection_number();
     vc->con.move(con);
@@ -525,17 +540,22 @@ NetAccept::acceptFastEvent(int event, void *ep)
 #endif
     SET_CONTINUATION_HANDLER(vc, &UnixNetVConnection::acceptEvent);
 
-    EThread *t    = e->ethread;
-    NetHandler *h = get_NetHandler(t);
     // Assign NetHandler->mutex to NetVC
     vc->mutex = h->mutex;
     // We must be holding the lock already to do later do_io_read's
     SCOPED_MUTEX_LOCK(lock, vc->mutex, e->ethread);
     vc->handleEvent(EVENT_NONE, nullptr);
     vc = nullptr;
-  } while (loop);
+  } while (count < additional_accepts);
 
 Ldone:
+  // if we stop looping as a result of hitting the accept limit,
+  // resechedule accepting to the end of the thread event queue
+  // for the goal of fairness between accepting and other work
+  Debug("iocore_net_accepts", "exited accept loop - count: %d, limit: %d", 
count, additional_accepts);
+  if (count >= additional_accepts) {
+    this_ethread()->schedule_imm_local(this);
+  }
   return EVENT_CONT;
 
 Lerror:
diff --git a/src/records/RecordsConfig.cc b/src/records/RecordsConfig.cc
index 89e6e94e55..edbf33083b 100644
--- a/src/records/RecordsConfig.cc
+++ b/src/records/RecordsConfig.cc
@@ -714,6 +714,9 @@ static const RecordElement RecordsConfig[] =
   //# Net Subsystem
   //#
   
//##############################################################################
+
+  {RECT_CONFIG, "proxy.config.net.additional_accepts", RECD_INT, "-1", 
RECU_DYNAMIC, RR_NULL, RECC_INT, "^-1|[0-9]+$", RECA_NULL}
+  ,
   {RECT_CONFIG, "proxy.config.net.connections_throttle", RECD_INT, "30000", 
RECU_RESTART_TS, RR_REQUIRED, RECC_STR, "^[0-9]+$", RECA_NULL}
   ,
   {RECT_CONFIG, "proxy.config.net.listen_backlog", RECD_INT, "-1", RECU_NULL, 
RR_NULL, RECC_NULL, nullptr, RECA_NULL}
diff --git a/src/traffic_server/traffic_server.cc 
b/src/traffic_server/traffic_server.cc
index 04e2f65c42..b8981e424c 100644
--- a/src/traffic_server/traffic_server.cc
+++ b/src/traffic_server/traffic_server.cc
@@ -200,7 +200,6 @@ static ArgumentDescription argument_descriptions[] = {
   {"net_threads",       'n', "Number of Net Threads",                          
                                                     "I",     
&num_of_net_threads,             "PROXY_NET_THREADS",       nullptr},
   {"udp_threads",       'U', "Number of UDP Threads",                          
                                                     "I",     
&num_of_udp_threads,             "PROXY_UDP_THREADS",       nullptr},
   {"accept_thread",     'a', "Use an Accept Thread",                           
                                                     "T",     
&num_accept_threads,             "PROXY_ACCEPT_THREAD",     nullptr},
-  {"accept_till_done",  'b', "Accept Till Done",                               
                                                     "T",     
&NetAccept::accept_till_done,    "PROXY_ACCEPT_TILL_DONE",  nullptr},
   {"httpport",          'p', "Port descriptor for HTTP Accept",                
                                                     "S*",    
&http_accept_port_descriptor,    "PROXY_HTTP_ACCEPT_PORT",  nullptr},
   {"disable_freelist",  'f', "Disable the freelist memory allocator",          
                                                     "T",     
&cmd_disable_freelist,           "PROXY_DPRINTF_LEVEL",     nullptr},
   {"disable_pfreelist", 'F', "Disable the freelist memory allocator in 
ProxyAllocator",                                             "T",     
&cmd_disable_pfreelist,

Reply via email to