This is an automated email from the ASF dual-hosted git repository.
amc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new 2bbcfe6 Thread class cleanup. For #1997.
2bbcfe6 is described below
commit 2bbcfe6d6b70b8ff32361cfd1dc875a5c8be8894
Author: Alan M. Carroll <[email protected]>
AuthorDate: Mon Jun 5 21:21:09 2017 -0500
Thread class cleanup.
For #1997.
---
iocore/eventsystem/I_Thread.h | 32 +++++++++++++++-------------
iocore/eventsystem/Thread.cc | 36 +++++++++++++-------------------
iocore/eventsystem/UnixEventProcessor.cc | 6 +++---
3 files changed, 35 insertions(+), 39 deletions(-)
diff --git a/iocore/eventsystem/I_Thread.h b/iocore/eventsystem/I_Thread.h
index 9e61cd3..f053dec 100644
--- a/iocore/eventsystem/I_Thread.h
+++ b/iocore/eventsystem/I_Thread.h
@@ -63,15 +63,18 @@
#error "include I_EventSystem.h or P_EventSystem.h"
#endif
+#include <functional>
+
#include "ts/ink_platform.h"
#include "ts/ink_thread.h"
#include "I_ProxyAllocator.h"
-class Thread;
class ProxyMutex;
-typedef void *(*ThreadFunction)(void *arg);
-static const int MAX_THREAD_NAME_LENGTH = 16;
+constexpr int MAX_THREAD_NAME_LENGTH = 16;
+
+/// The signature of a function to be called by a thread.
+using ThreadFunction = std::function<void()>;
/**
Base class for the threads in the Event System. Thread is the base
@@ -112,10 +115,13 @@ public:
Ptr<ProxyMutex> mutex;
// PRIVATE
- void set_specific();
Thread();
+ Thread(const Thread &) = delete;
+ Thread &operator=(const Thread &) = delete;
virtual ~Thread();
+ void set_specific();
+
static ink_hrtime cur_time;
inkcoreapi static ink_thread_key thread_data_key;
@@ -138,18 +144,16 @@ public:
ProxyAllocator ioAllocator;
ProxyAllocator ioBlockAllocator;
-private:
- // prevent unauthorized copies (Not implemented)
- Thread(const Thread &);
- Thread &operator=(const Thread &);
-
public:
- ink_thread start(const char *name, size_t stacksize, ThreadFunction f, void
*a, void *stack);
+ /** Start the underlying thread.
+
+ The thread name is set to @a name. The stack for the thread is either @a
stack or, if that is
+ @c nullptr a stack of size @a stacksize is allocated and used. If @a f
is present and valid it
+ is called in the thread context. Otherwise the method @c execute is
invoked.
+ */
+ ink_thread start(const char *name, void *stack, size_t stacksize,
ThreadFunction const &f = ThreadFunction());
- virtual void
- execute()
- {
- }
+ virtual void execute() = 0;
/** Get the current ATS high resolution time.
This gets a cached copy of the time so it is very fast and reasonably
accurate.
diff --git a/iocore/eventsystem/Thread.cc b/iocore/eventsystem/Thread.cc
index 8be6140..98f0fd7 100644
--- a/iocore/eventsystem/Thread.cc
+++ b/iocore/eventsystem/Thread.cc
@@ -37,7 +37,7 @@
static ink_thread_key init_thread_key();
-ink_hrtime Thread::cur_time = 0;
+ink_hrtime Thread::cur_time = ink_get_hrtime_internal();
inkcoreapi ink_thread_key Thread::thread_data_key = init_thread_key();
Thread::Thread()
@@ -54,16 +54,10 @@ Thread::~Thread()
MUTEX_UNTAKE_LOCK(mutex, (EThread *)this);
}
-static void
-key_destructor(void *value)
-{
- (void)value;
-}
-
ink_thread_key
init_thread_key()
{
- ink_thread_key_create(&Thread::thread_data_key, key_destructor);
+ ink_thread_key_create(&Thread::thread_data_key, nullptr);
return Thread::thread_data_key;
}
@@ -72,42 +66,40 @@ init_thread_key()
///////////////////////////////////////////////
struct thread_data_internal {
- ThreadFunction f;
- void *a;
- Thread *me;
- char name[MAX_THREAD_NAME_LENGTH];
+ ThreadFunction f; ///< Function to excecute in the thread.
+ Thread *me; ///< The class instance.
+ char name[MAX_THREAD_NAME_LENGTH]; ///< Name for the thread.
};
static void *
spawn_thread_internal(void *a)
{
- thread_data_internal *p = (thread_data_internal *)a;
+ auto *p = static_cast<thread_data_internal *>(a);
p->me->set_specific();
ink_set_thread_name(p->name);
+
if (p->f) {
- p->f(p->a);
+ p->f();
} else {
p->me->execute();
}
- ats_free(a);
+
+ delete p;
return nullptr;
}
ink_thread
-Thread::start(const char *name, size_t stacksize, ThreadFunction f, void *a,
void *stack)
+Thread::start(const char *name, void *stack, size_t stacksize, ThreadFunction
const &f)
{
- thread_data_internal *p = (thread_data_internal
*)ats_malloc(sizeof(thread_data_internal));
+ auto *p = new thread_data_internal{f, this, ""};
- p->f = f;
- p->a = a;
- p->me = this;
- memset(p->name, 0, MAX_THREAD_NAME_LENGTH);
+ ink_zero(p->name);
ink_strlcpy(p->name, name, MAX_THREAD_NAME_LENGTH);
if (stacksize == 0) {
stacksize = DEFAULT_STACKSIZE;
}
- tid = ink_thread_create(spawn_thread_internal, (void *)p, 0, stacksize,
stack);
+ tid = ink_thread_create(spawn_thread_internal, p, 0, stacksize, stack);
return tid;
}
diff --git a/iocore/eventsystem/UnixEventProcessor.cc
b/iocore/eventsystem/UnixEventProcessor.cc
index 48f2074..3e25bc1 100644
--- a/iocore/eventsystem/UnixEventProcessor.cc
+++ b/iocore/eventsystem/UnixEventProcessor.cc
@@ -55,7 +55,7 @@ EventProcessor::spawn_event_threads(int n_threads, const char
*et_name, size_t s
n_threads_for_type[new_thread_group_id] = n_threads;
for (i = 0; i < n_threads; i++) {
snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", et_name, i);
- eventthread[new_thread_group_id][i]->start(thr_name, stacksize, nullptr,
nullptr, nullptr);
+ eventthread[new_thread_group_id][i]->start(thr_name, nullptr, stacksize);
}
n_thread_groups++;
@@ -237,7 +237,7 @@ EventProcessor::start(int n_event_threads, size_t stacksize)
#endif // TS_USE_HWLOC
// Start our new thread with our new stack.
- tid = all_ethreads[i]->start(thr_name, stacksize, nullptr, nullptr,
stack);
+ tid = all_ethreads[i]->start(thr_name, stack, stacksize);
stack = nullptr;
#if TS_USE_HWLOC
@@ -294,7 +294,7 @@ EventProcessor::spawn_thread(Continuation *cont, const char
*thr_name, size_t st
++n_dthreads; // Be very sure this is after the array element update.
}
- e->ethread->start(thr_name, stacksize, nullptr, nullptr, nullptr);
+ e->ethread->start(thr_name, nullptr, stacksize);
return e;
}
--
To stop receiving notification emails like this one, please contact
['"[email protected]" <[email protected]>'].