This is an automated email from the ASF dual-hosted git repository.

bcall pushed a commit to branch 9.2.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git


The following commit(s) were added to refs/heads/9.2.x by this push:
     new e18d359ee7 applying additional accepts PR to 9.2.x (#10590)
e18d359ee7 is described below

commit e18d359ee7d891eb654bb7f5724166ffa6d4c7de
Author: Nathan Wang <wang.natha...@gmail.com>
AuthorDate: Mon Nov 6 13:09:56 2023 -0800

    applying additional accepts PR to 9.2.x (#10590)
    
    Co-authored-by: Nathan Wang <nathan_c_w...@apple.com>
---
 doc/admin-guide/files/records.config.en.rst       | 12 +++++++
 doc/appendices/command-line/traffic_server.en.rst |  2 --
 iocore/net/P_UnixNet.h                            |  2 ++
 iocore/net/UnixNet.cc                             | 13 ++++++++
 iocore/net/UnixNetAccept.cc                       | 39 ++++++++++++++++++-----
 mgmt/RecordsConfig.cc                             |  2 ++
 proxy/Main.h                                      |  2 --
 src/traffic_server/traffic_server.cc              |  1 -
 8 files changed, 60 insertions(+), 13 deletions(-)

diff --git a/doc/admin-guide/files/records.config.en.rst 
b/doc/admin-guide/files/records.config.en.rst
index e6865b8c80..65f454ac00 100644
--- a/doc/admin-guide/files/records.config.en.rst
+++ b/doc/admin-guide/files/records.config.en.rst
@@ -430,6 +430,18 @@ Thread Variables
 Network
 =======
 
+.. ts:cv:: CONFIG proxy.config.net.additional_accepts INT -1
+   :reloadable:
+
+   This config addresses an issue that can sometimes happen if threads are 
caught in
+   a net accept while loop, become busy exclusviely accepting connections, and 
are prevented
+   from doing other work. This can cause an increase in latency and average 
event
+   loop time. When set to 0, a thread accepts only 1 connection per event loop.
+   When set to any other positive integer x, a thread will accept up to x+1 
connections
+   per event loop. When set to -1 (default), a thread will accept connections 
as long
+   as there are connections waiting in its listening queue.is equivalent to 
"accept all",
+   and setting to 0 is equivalent to "accept one".
+
 .. ts:cv:: CONFIG proxy.config.net.connections_throttle INT 30000
 
    The total number of client and origin server connections that the server
diff --git a/doc/appendices/command-line/traffic_server.en.rst 
b/doc/appendices/command-line/traffic_server.en.rst
index edcd51d57c..7818e78c46 100644
--- a/doc/appendices/command-line/traffic_server.en.rst
+++ b/doc/appendices/command-line/traffic_server.en.rst
@@ -32,8 +32,6 @@ Options
 
 .. option:: -a, --accepts_thread
 
-.. option:: -b, --accept_till_done
-
 .. option:: -B TAGS, --action_tags TAGS
 
 .. option:: --bind_stdout FILE
diff --git a/iocore/net/P_UnixNet.h b/iocore/net/P_UnixNet.h
index fa58aa4ffc..d5b68bf787 100644
--- a/iocore/net/P_UnixNet.h
+++ b/iocore/net/P_UnixNet.h
@@ -281,6 +281,7 @@ public:
     uint32_t transaction_no_activity_timeout_in = 0;
     uint32_t keep_alive_no_activity_timeout_in  = 0;
     uint32_t default_inactivity_timeout         = 0;
+    uint32_t additional_accepts                 = 0;
 
     /** Return the address of the first value in this struct.
 
@@ -327,6 +328,7 @@ public:
   void remove_from_keep_alive_queue(NetEvent *ne);
   bool add_to_active_queue(NetEvent *ne);
   void remove_from_active_queue(NetEvent *ne);
+  int get_additional_accepts();
 
   /// Per process initialization logic.
   static void init_for_process();
diff --git a/iocore/net/UnixNet.cc b/iocore/net/UnixNet.cc
index c0f1052dbb..e1dca3ce39 100644
--- a/iocore/net/UnixNet.cc
+++ b/iocore/net/UnixNet.cc
@@ -294,6 +294,9 @@ NetHandler::update_nethandler_config(const char *str, 
RecDataT, RecData data, vo
   } else if (name == "proxy.config.net.default_inactivity_timeout"sv) {
     updated_member = &NetHandler::global_config.default_inactivity_timeout;
     Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to 
%" PRId64, data.rec_int);
+  } else if (name == "proxy.config.net.additional_accepts"sv) {
+    updated_member = &NetHandler::global_config.additional_accepts;
+    Debug("net_queue", "proxy.config.net.additional_accepts updated to %" 
PRId64, data.rec_int);
   }
 
   if (updated_member) {
@@ -329,6 +332,7 @@ NetHandler::init_for_process()
   REC_ReadConfigInt32(global_config.transaction_no_activity_timeout_in, 
"proxy.config.net.transaction_no_activity_timeout_in");
   REC_ReadConfigInt32(global_config.keep_alive_no_activity_timeout_in, 
"proxy.config.net.keep_alive_no_activity_timeout_in");
   REC_ReadConfigInt32(global_config.default_inactivity_timeout, 
"proxy.config.net.default_inactivity_timeout");
+  REC_ReadConfigInt32(global_config.additional_accepts, 
"proxy.config.net.additional_accepts");
 
   RecRegisterConfigUpdateCb("proxy.config.net.max_connections_in", 
update_nethandler_config, nullptr);
   RecRegisterConfigUpdateCb("proxy.config.net.max_requests_in", 
update_nethandler_config, nullptr);
@@ -336,6 +340,7 @@ NetHandler::init_for_process()
   
RecRegisterConfigUpdateCb("proxy.config.net.transaction_no_activity_timeout_in",
 update_nethandler_config, nullptr);
   
RecRegisterConfigUpdateCb("proxy.config.net.keep_alive_no_activity_timeout_in", 
update_nethandler_config, nullptr);
   RecRegisterConfigUpdateCb("proxy.config.net.default_inactivity_timeout", 
update_nethandler_config, nullptr);
+  RecRegisterConfigUpdateCb("proxy.config.net.additional_accepts", 
update_nethandler_config, nullptr);
 
   Debug("net_queue", "proxy.config.net.max_connections_in updated to %d", 
global_config.max_connections_in);
   Debug("net_queue", "proxy.config.net.max_requests_in updated to %d", 
global_config.max_requests_in);
@@ -345,6 +350,7 @@ NetHandler::init_for_process()
   Debug("net_queue", "proxy.config.net.keep_alive_no_activity_timeout_in 
updated to %d",
         global_config.keep_alive_no_activity_timeout_in);
   Debug("net_queue", "proxy.config.net.default_inactivity_timeout updated to 
%d", global_config.default_inactivity_timeout);
+  Debug("net_queue", "proxy.config.net.additional_accepts updated to %d", 
global_config.additional_accepts);
 }
 
 //
@@ -782,3 +788,10 @@ NetHandler::remove_from_active_queue(NetEvent *ne)
     --active_queue_size;
   }
 }
+
+int
+NetHandler::get_additional_accepts()
+{
+  int config_value = config.additional_accepts + 1;
+  return (config_value > 0 ? config_value : INT32_MAX - 1);
+}
\ No newline at end of file
diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc
index bbf6aac13f..5545ee35e9 100644
--- a/iocore/net/UnixNetAccept.cc
+++ b/iocore/net/UnixNetAccept.cc
@@ -27,7 +27,6 @@
 #include "P_Net.h"
 
 using NetAcceptHandler = int (NetAccept::*)(int, void *);
-int accept_till_done   = 1;
 
 // we need to protect naVec since it might be accessed
 // in different threads at the same time
@@ -48,10 +47,12 @@ net_accept(NetAccept *na, void *ep, bool blockable)
   Event *e               = static_cast<Event *>(ep);
   int res                = 0;
   int count              = 0;
-  int loop               = accept_till_done;
   UnixNetVConnection *vc = nullptr;
   Connection con;
 
+  EThread *t             = e->ethread;
+  int additional_accepts = get_NetHandler(t)->get_additional_accepts();
+
   if (!blockable) {
     if (!MUTEX_TAKE_TRY_LOCK(na->action_->mutex, e->ethread)) {
       return 0;
@@ -83,7 +84,7 @@ net_accept(NetAccept *na, void *ep, bool blockable)
       goto Ldone; // note: @a con will clean up the socket when it goes out of 
scope.
     }
 
-    ++count;
+    count++;
     NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1);
     vc->id = net_next_connection_number();
     vc->con.move(con);
@@ -124,12 +125,20 @@ net_accept(NetAccept *na, void *ep, bool blockable)
       vc->mutex = h->mutex;
       t->schedule_imm(vc);
     }
-  } while (loop);
+  } while (count < additional_accepts);
 
 Ldone:
   if (!blockable) {
     MUTEX_UNTAKE_LOCK(na->action_->mutex, e->ethread);
   }
+
+  // if we stop looping as a result of hitting the accept limit,
+  // resechedule accepting to the end of the thread event queue
+  // for the goal of fairness between accepting and other work
+  Debug("iocore_net_accepts", "exited accept loop - count: %d, limit: %d", 
count, additional_accepts);
+  if (count >= additional_accepts) {
+    this_ethread()->schedule_imm_local(na);
+  }
   return count;
 }
 
@@ -285,11 +294,13 @@ int
 NetAccept::do_blocking_accept(EThread *t)
 {
   int res                = 0;
-  int loop               = accept_till_done;
   UnixNetVConnection *vc = nullptr;
   Connection con;
   con.sock_type = SOCK_STREAM;
 
+  int count              = 0;
+  int additional_accepts = get_NetHandler(t)->get_additional_accepts();
+
   // do-while for accepting all the connections
   // added by YTS Team, yamsat
   do {
@@ -340,6 +351,7 @@ NetAccept::do_blocking_accept(EThread *t)
       return -1;
     }
 
+    count++;
     NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1);
     vc->id = net_next_connection_number();
     vc->con.move(con);
@@ -372,7 +384,7 @@ NetAccept::do_blocking_accept(EThread *t)
     // Assign NetHandler->mutex to NetVC
     vc->mutex = h->mutex;
     localt->schedule_imm(vc);
-  } while (loop);
+  } while (count < additional_accepts);
 
   return 1;
 }
@@ -428,7 +440,10 @@ NetAccept::acceptFastEvent(int event, void *ep)
   con.sock_type = SOCK_STREAM;
 
   UnixNetVConnection *vc = nullptr;
-  int loop               = accept_till_done;
+  int count              = 0;
+  EThread *t             = e->ethread;
+  NetHandler *h          = get_NetHandler(t);
+  int additional_accepts = h->get_additional_accepts();
 
   do {
     socklen_t sz = sizeof(con.addr);
@@ -493,6 +508,7 @@ NetAccept::acceptFastEvent(int event, void *ep)
     vc = (UnixNetVConnection 
*)this->getNetProcessor()->allocate_vc(e->ethread);
     ink_release_assert(vc);
 
+    count++;
     NET_SUM_GLOBAL_DYN_STAT(net_connections_currently_open_stat, 1);
     vc->id = net_next_connection_number();
     vc->con.move(con);
@@ -528,9 +544,16 @@ NetAccept::acceptFastEvent(int event, void *ep)
     SCOPED_MUTEX_LOCK(lock, vc->mutex, e->ethread);
     vc->handleEvent(EVENT_NONE, nullptr);
     vc = nullptr;
-  } while (loop);
+  } while (count < additional_accepts);
 
 Ldone:
+  // if we stop looping as a result of hitting the accept limit,
+  // resechedule accepting to the end of the thread event queue
+  // for the goal of fairness between accepting and other work
+  Debug("iocore_net_accepts", "exited accept loop - count: %d, limit: %d", 
count, additional_accepts);
+  if (count >= additional_accepts) {
+    this_ethread()->schedule_imm_local(this);
+  }
   return EVENT_CONT;
 
 Lerror:
diff --git a/mgmt/RecordsConfig.cc b/mgmt/RecordsConfig.cc
index 433ff381b8..b63e0523c2 100644
--- a/mgmt/RecordsConfig.cc
+++ b/mgmt/RecordsConfig.cc
@@ -759,6 +759,8 @@ static const RecordElement RecordsConfig[] =
   //# Net Subsystem
   //#
   
//##############################################################################
+  {RECT_CONFIG, "proxy.config.net.additional_accepts", RECD_INT, "-1", 
RECU_DYNAMIC, RR_NULL, RECC_INT, "^-1|[0-9]+$", RECA_NULL}
+   ,
   {RECT_CONFIG, "proxy.config.net.connections_throttle", RECD_INT, "30000", 
RECU_RESTART_TS, RR_REQUIRED, RECC_STR, "^[0-9]+$", RECA_NULL}
   ,
   {RECT_CONFIG, "proxy.config.net.listen_backlog", RECD_INT, "-1", RECU_NULL, 
RR_NULL, RECC_NULL, nullptr, RECA_NULL}
diff --git a/proxy/Main.h b/proxy/Main.h
index 51f1cec117..9f383ec3e9 100644
--- a/proxy/Main.h
+++ b/proxy/Main.h
@@ -34,8 +34,6 @@
 // Global Data
 //
 // Global Configuration
-
-extern int accept_till_done;
 extern int auto_clear_hostdb_flag;
 
 //
diff --git a/src/traffic_server/traffic_server.cc 
b/src/traffic_server/traffic_server.cc
index 8a88ab8e94..87808da1d3 100644
--- a/src/traffic_server/traffic_server.cc
+++ b/src/traffic_server/traffic_server.cc
@@ -197,7 +197,6 @@ static ArgumentDescription argument_descriptions[] = {
   {"net_threads", 'n', "Number of Net Threads", "I", &num_of_net_threads, 
"PROXY_NET_THREADS", nullptr},
   {"udp_threads", 'U', "Number of UDP Threads", "I", &num_of_udp_threads, 
"PROXY_UDP_THREADS", nullptr},
   {"accept_thread", 'a', "Use an Accept Thread", "T", &num_accept_threads, 
"PROXY_ACCEPT_THREAD", nullptr},
-  {"accept_till_done", 'b', "Accept Till Done", "T", &accept_till_done, 
"PROXY_ACCEPT_TILL_DONE", nullptr},
   {"httpport", 'p', "Port descriptor for HTTP Accept", "S*", 
&http_accept_port_descriptor, "PROXY_HTTP_ACCEPT_PORT", nullptr},
   {"disable_freelist", 'f', "Disable the freelist memory allocator", "T", 
&cmd_disable_freelist, "PROXY_DPRINTF_LEVEL", nullptr},
   {"disable_pfreelist", 'F', "Disable the freelist memory allocator in 
ProxyAllocator", "T", &cmd_disable_pfreelist,

Reply via email to