This is an automated email from the ASF dual-hosted git repository.
oknet pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new 3c5f2fe Optimize: make NetAccept::init_accept_loop has the logic
similar to NetAccept::init_accept_per_thread
3c5f2fe is described below
commit 3c5f2fe4b87ef8796b0acac7ac89b48da26073c7
Author: Oknet Xu <[email protected]>
AuthorDate: Wed Aug 1 20:25:38 2018 +0800
Optimize: make NetAccept::init_accept_loop has the logic similar to
NetAccept::init_accept_per_thread
---
iocore/net/P_NetAccept.h | 4 ++--
iocore/net/UnixNetAccept.cc | 34 +++++++++++++++++++++-------------
iocore/net/UnixNetProcessor.cc | 35 +++++++++++------------------------
3 files changed, 34 insertions(+), 39 deletions(-)
diff --git a/iocore/net/P_NetAccept.h b/iocore/net/P_NetAccept.h
index 151753b..d3fe6c2 100644
--- a/iocore/net/P_NetAccept.h
+++ b/iocore/net/P_NetAccept.h
@@ -95,9 +95,9 @@ struct NetAccept : public Continuation {
virtual NetProcessor *getNetProcessor() const;
- void init_accept_loop(const char *);
virtual void init_accept(EThread *t = nullptr);
- virtual void init_accept_per_thread();
+ void init_accept_loop();
+ void init_accept_per_thread();
virtual void stop_accept();
virtual NetAccept *clone() const;
diff --git a/iocore/net/UnixNetAccept.cc b/iocore/net/UnixNetAccept.cc
index fb451d7..f4ee913 100644
--- a/iocore/net/UnixNetAccept.cc
+++ b/iocore/net/UnixNetAccept.cc
@@ -151,13 +151,28 @@ getNetAccept(int ID)
// This should be done for low latency, high connection rate sockets.
//
void
-NetAccept::init_accept_loop(const char *thr_name)
+NetAccept::init_accept_loop()
{
+ int i, n;
+ char thr_name[MAX_THREAD_NAME_LENGTH];
size_t stacksize;
-
+ if (do_listen(BLOCKING))
+ return;
REC_ReadConfigInteger(stacksize, "proxy.config.thread.default.stacksize");
SET_CONTINUATION_HANDLER(this, &NetAccept::acceptLoopEvent);
- eventProcessor.spawn_thread(this, thr_name, stacksize);
+
+ n = opt.accept_threads;
+ // Fill in accept thread from configuration if necessary.
+ if (n < 0) {
+ REC_ReadConfigInteger(n, "proxy.config.accept_threads");
+ }
+
+ for (i = 0; i < n; i++) {
+ NetAccept *a = (i < n - 1) ? clone() : this;
+ snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[ACCEPT %d:%d]", i,
ats_ip_port_host_order(&server.accept_addr));
+ eventProcessor.spawn_thread(a, thr_name, stacksize);
+ Debug("iocore_net_accept_start", "Created accept thread #%d for port %d",
i + 1, ats_ip_port_host_order(&server.accept_addr));
+ }
}
//
@@ -185,7 +200,7 @@ NetAccept::init_accept(EThread *t)
SET_HANDLER((NetAcceptHandler)&NetAccept::acceptEvent);
period = -HRTIME_MSECONDS(net_accept_period);
- t->schedule_every(this, period, opt.etype);
+ t->schedule_every(this, period);
}
void
@@ -209,14 +224,7 @@ NetAccept::init_accept_per_thread()
n = eventProcessor.thread_group[opt.etype]._count;
for (i = 0; i < n; i++) {
- NetAccept *a;
-
- if (i < n - 1) {
- a = clone();
- } else {
- a = this;
- }
-
+ NetAccept *a = (i < n - 1) ? clone() : this;
EThread *t = eventProcessor.thread_group[opt.etype]._thread[i];
PollDescriptor *pd = get_PollDescriptor(t);
@@ -225,7 +233,7 @@ NetAccept::init_accept_per_thread()
}
a->mutex = get_NetHandler(t)->mutex;
- t->schedule_every(a, period, opt.etype);
+ t->schedule_every(a, period);
}
}
diff --git a/iocore/net/UnixNetProcessor.cc b/iocore/net/UnixNetProcessor.cc
index c61e112..a1e69ec 100644
--- a/iocore/net/UnixNetProcessor.cc
+++ b/iocore/net/UnixNetProcessor.cc
@@ -90,7 +90,6 @@ UnixNetProcessor::accept_internal(Continuation *cont, int fd,
AcceptOptions cons
ProxyMutex *mutex = this_ethread()->mutex.get();
int accept_threads = opt.accept_threads; // might be changed.
IpEndpoint accept_ip; // local binding address.
- char thr_name[MAX_THREAD_NAME_LENGTH];
NetAccept *na = createNetAccept(opt);
na->id = ink_atomic_increment(&net_accept_number, 1);
@@ -144,32 +143,20 @@ UnixNetProcessor::accept_internal(Continuation *cont, int
fd, AcceptOptions cons
if (opt.frequent_accept) { // true
if (accept_threads > 0) {
- if (0 == na->do_listen(BLOCKING)) {
- for (int i = 1; i < accept_threads; ++i) {
- NetAccept *a = na->clone();
- snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[ACCEPT %d:%d]", i - 1,
ats_ip_port_host_order(&accept_ip));
- a->init_accept_loop(thr_name);
- Debug("iocore_net_accept_start", "Created accept thread #%d for port
%d", i, ats_ip_port_host_order(&accept_ip));
- }
-
- // Start the "template" accept thread last.
- Debug("iocore_net_accept_start", "Created accept thread #%d for port
%d", accept_threads,
- ats_ip_port_host_order(&accept_ip));
- snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[ACCEPT %d:%d]",
accept_threads - 1, ats_ip_port_host_order(&accept_ip));
- na->init_accept_loop(thr_name);
-#if !TS_USE_POSIX_CAP
- } else if (fd == ts::NO_FD && opt.local_port < 1024 && 0 != geteuid()) {
- // TS-2054 - we can fail to bind a privileged port if we waited for
cache and we tried
- // to open the socket in do_listen and we're not using libcap
(POSIX_CAP) and so have reduced
- // privilege. Mention this to the admin.
- Warning("Failed to open reserved port %d due to lack of process
privilege. Use POSIX capabilities if possible or disable "
- "wait_for_cache.",
- opt.local_port);
-#endif // TS_USE_POSIX_CAP
- }
+ na->init_accept_loop();
} else {
na->init_accept_per_thread();
}
+#if !TS_USE_POSIX_CAP
+ if (fd == ts::NO_FD && opt.local_port < 1024 && 0 != geteuid()) {
+ // TS-2054 - we can fail to bind a privileged port if we waited for
cache and we tried
+ // to open the socket in do_listen and we're not using libcap
(POSIX_CAP) and so have reduced
+ // privilege. Mention this to the admin.
+ Warning("Failed to open reserved port %d due to lack of process
privilege. Use POSIX capabilities if possible or disable "
+ "wait_for_cache.",
+ opt.local_port);
+ }
+#endif // TS_USE_POSIX_CAP
} else {
na->init_accept(nullptr);
}