This is an automated email from the ASF dual-hosted git repository.
shinrich pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/trafficserver.git
The following commit(s) were added to refs/heads/master by this push:
new 72ed044 Replace TRY_MUTEX_LOCK_FOR with equivalent TRY_MUTEX_LOCK
72ed044 is described below
commit 72ed0445f2b3d9f6dbf8db087f32c8d2c1893418
Author: Susan Hinrichs <[email protected]>
AuthorDate: Tue Aug 14 17:47:55 2018 +0000
Replace TRY_MUTEX_LOCK_FOR with equivalent TRY_MUTEX_LOCK
---
iocore/eventsystem/I_Lock.h | 38 --------------------------------------
iocore/eventsystem/UnixEThread.cc | 2 +-
iocore/hostdb/HostDB.cc | 14 +++++++-------
iocore/net/SSLNetVConnection.cc | 2 +-
iocore/net/UnixNetVConnection.cc | 4 ++--
iocore/net/UnixUDPNet.cc | 2 +-
6 files changed, 12 insertions(+), 50 deletions(-)
diff --git a/iocore/eventsystem/I_Lock.h b/iocore/eventsystem/I_Lock.h
index e97c7f8..b56ec79 100644
--- a/iocore/eventsystem/I_Lock.h
+++ b/iocore/eventsystem/I_Lock.h
@@ -70,42 +70,8 @@
*/
#define MUTEX_TRY_LOCK(_l, _m, _t) MutexTryLock _l(MakeSourceLocation(), (char
*)nullptr, _m, _t)
-/**
- Attempts to acquire the lock to the ProxyMutex.
-
- This macro performs up to the specified number of attempts to
- acquire the lock on the ProxyMutex object. It does so by running
- a busy loop (busy wait) '_sc' times. You should use it with care
- since it blocks the thread during that time and wastes CPU time.
-
- @param _l Arbitrary name for the lock to use in this call (lock variable)
- @param _m A pointer to (or address of) a ProxyMutex object
- @param _t The current EThread executing your code.
- @param _sc The number of attempts or spin count. It must be a positive value.
-
-*/
-#define MUTEX_TRY_LOCK_SPIN(_l, _m, _t, _sc) MutexTryLock
_l(MakeSourceLocation(), (char *)nullptr, _m, _t, _sc)
-
-/**
- Attempts to acquire the lock to the ProxyMutex.
-
- This macro attempts to acquire the lock to the specified ProxyMutex
- object in a non-blocking manner. After using the macro you can
- see if it was successful by comparing the lock variable with true
- or false (the variable name passed in the _l parameter).
-
- @param _l Arbitrary name for the lock to use in this call (lock variable)
- @param _m A pointer to (or address of) a ProxyMutex object
- @param _t The current EThread executing your code.
- @param _c Continuation whose mutex will be attempted to lock.
-
-*/
-
-#define MUTEX_TRY_LOCK_FOR(_l, _m, _t, _c) MutexTryLock
_l(MakeSourceLocation(), nullptr, _m, _t)
#else // DEBUG
#define MUTEX_TRY_LOCK(_l, _m, _t) MutexTryLock _l(_m, _t)
-#define MUTEX_TRY_LOCK_SPIN(_l, _m, _t, _sc) MutexTryLock _l(_m, _t, _sc)
-#define MUTEX_TRY_LOCK_FOR(_l, _m, _t, _c) MutexTryLock _l(_m, _t)
#endif // DEBUG
/**
@@ -126,12 +92,8 @@
// DEPRECATED DEPRECATED DEPRECATED
#ifdef DEBUG
#define MUTEX_TAKE_TRY_LOCK(_m, _t) Mutex_trylock(MakeSourceLocation(), (char
*)nullptr, _m, _t)
-#define MUTEX_TAKE_TRY_LOCK_FOR(_m, _t, _c)
Mutex_trylock(MakeSourceLocation(), (char *)nullptr, _m, _t)
-#define MUTEX_TAKE_TRY_LOCK_FOR_SPIN(_m, _t, _c, _sc)
Mutex_trylock_spin(MakeSourceLocation(), nullptr, _m, _t, _sc)
#else
#define MUTEX_TAKE_TRY_LOCK(_m, _t) Mutex_trylock(_m, _t)
-#define MUTEX_TAKE_TRY_LOCK_FOR(_m, _t, _c) Mutex_trylock(_m, _t)
-#define MUTEX_TAKE_TRY_LOCK_FOR_SPIN(_m, _t, _c, _sc) Mutex_trylock_spin(_m,
_t, _sc)
#endif
#ifdef DEBUG
diff --git a/iocore/eventsystem/UnixEThread.cc
b/iocore/eventsystem/UnixEThread.cc
index b66b734..a5c72c5 100644
--- a/iocore/eventsystem/UnixEThread.cc
+++ b/iocore/eventsystem/UnixEThread.cc
@@ -118,7 +118,7 @@ void
EThread::process_event(Event *e, int calling_code)
{
ink_assert((!e->in_the_prot_queue && !e->in_the_priority_queue));
- MUTEX_TRY_LOCK_FOR(lock, e->mutex, this, e->continuation);
+ MUTEX_TRY_LOCK(lock, e->mutex, this);
if (!lock.is_locked()) {
e->timeout_at = cur_time + DELAY_FOR_RETRY;
EventQueueExternal.enqueue_local(e);
diff --git a/iocore/hostdb/HostDB.cc b/iocore/hostdb/HostDB.cc
index f62cf24..231c23e 100644
--- a/iocore/hostdb/HostDB.cc
+++ b/iocore/hostdb/HostDB.cc
@@ -1204,7 +1204,7 @@ HostDBContinuation::dnsPendingEvent(int event, Event *e)
}
if (event == EVENT_INTERVAL) {
// we timed out, return a failure to the user
- MUTEX_TRY_LOCK_FOR(lock, action.mutex, ((Event *)e)->ethread,
action.continuation);
+ MUTEX_TRY_LOCK(lock, action.mutex, ((Event *)e)->ethread);
if (!lock.is_locked()) {
timeout = eventProcessor.schedule_in(this, HOST_DB_RETRY_PERIOD);
return EVENT_CONT;
@@ -1259,7 +1259,7 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
hostdb_cont_free(this);
return EVENT_DONE;
}
- MUTEX_TRY_LOCK_FOR(lock, action.mutex, thread, action.continuation);
+ MUTEX_TRY_LOCK(lock, action.mutex, thread);
if (!lock.is_locked()) {
timeout = thread->schedule_in(this, HOST_DB_RETRY_PERIOD);
return EVENT_CONT;
@@ -1494,12 +1494,12 @@ HostDBContinuation::dnsEvent(int event, HostEnt *e)
// Since reply_to_cont will call the hanlder on the action.continuation,
it is important that we hold
// that mutex.
bool need_to_reschedule = true;
- MUTEX_TRY_LOCK_FOR(lock, action.mutex, thread, action.continuation);
+ MUTEX_TRY_LOCK(lock, action.mutex, thread);
if (lock.is_locked()) {
need_to_reschedule = !action.cancelled;
if (!action.cancelled) {
if (action.continuation->mutex) {
- MUTEX_TRY_LOCK_FOR(lock2, action.continuation->mutex, thread,
action.continuation);
+ MUTEX_TRY_LOCK(lock2, action.continuation->mutex, thread);
if (lock2.is_locked()) {
reply_to_cont(action.continuation, r, is_srv());
need_to_reschedule = false;
@@ -1534,7 +1534,7 @@ HostDBContinuation::iterateEvent(int event, Event *e)
ink_assert(!link.prev && !link.next);
EThread *t = e ? e->ethread : this_ethread();
- MUTEX_TRY_LOCK_FOR(lock, action.mutex, t, action.continuation);
+ MUTEX_TRY_LOCK(lock, action.mutex, t);
if (!lock.is_locked()) {
Debug("hostdb", "iterateEvent event=%d eventp=%p: reschedule due to not
getting action mutex", event, e);
mutex->thread_holding->schedule_in(this, HOST_DB_RETRY_PERIOD);
@@ -1550,7 +1550,7 @@ HostDBContinuation::iterateEvent(int event, Event *e)
if (current_iterate_pos < hostDB.refcountcache->partition_count()) {
// TODO: configurable number at a time?
ProxyMutex *bucket_mutex =
hostDB.refcountcache->get_partition(current_iterate_pos).lock.get();
- MUTEX_TRY_LOCK_FOR(lock_bucket, bucket_mutex, t, this);
+ MUTEX_TRY_LOCK(lock_bucket, bucket_mutex, t);
if (!lock_bucket.is_locked()) {
// we couldn't get the bucket lock, let's just reschedule and try later.
Debug("hostdb", "iterateEvent event=%d eventp=%p: reschedule due to not
getting bucket mutex", event, e);
@@ -1591,7 +1591,7 @@ HostDBContinuation::probeEvent(int /* event ATS_UNUSED
*/, Event *e)
ink_assert(!link.prev && !link.next);
EThread *t = e ? e->ethread : this_ethread();
- MUTEX_TRY_LOCK_FOR(lock, action.mutex, t, action.continuation);
+ MUTEX_TRY_LOCK(lock, action.mutex, t);
if (!lock.is_locked()) {
mutex->thread_holding->schedule_in(this, HOST_DB_RETRY_PERIOD);
return EVENT_CONT;
diff --git a/iocore/net/SSLNetVConnection.cc b/iocore/net/SSLNetVConnection.cc
index eb78db2..b418c43 100644
--- a/iocore/net/SSLNetVConnection.cc
+++ b/iocore/net/SSLNetVConnection.cc
@@ -458,7 +458,7 @@ SSLNetVConnection::net_read_io(NetHandler *nh, EThread
*lthread)
return;
}
- MUTEX_TRY_LOCK_FOR(lock, s->vio.mutex, lthread, s->vio.cont);
+ MUTEX_TRY_LOCK(lock, s->vio.mutex, lthread);
if (!lock.is_locked()) {
readReschedule(nh);
return;
diff --git a/iocore/net/UnixNetVConnection.cc b/iocore/net/UnixNetVConnection.cc
index d2834a3..c6f591d 100644
--- a/iocore/net/UnixNetVConnection.cc
+++ b/iocore/net/UnixNetVConnection.cc
@@ -188,7 +188,7 @@ read_from_net(NetHandler *nh, UnixNetVConnection *vc,
EThread *thread)
ProxyMutex *mutex = thread->mutex.get();
int64_t r = 0;
- MUTEX_TRY_LOCK_FOR(lock, s->vio.mutex, thread, s->vio.cont);
+ MUTEX_TRY_LOCK(lock, s->vio.mutex, thread);
if (!lock.is_locked()) {
read_reschedule(nh, vc);
@@ -367,7 +367,7 @@ write_to_net_io(NetHandler *nh, UnixNetVConnection *vc,
EThread *thread)
NetState *s = &vc->write;
ProxyMutex *mutex = thread->mutex.get();
- MUTEX_TRY_LOCK_FOR(lock, s->vio.mutex, thread, s->vio.cont);
+ MUTEX_TRY_LOCK(lock, s->vio.mutex, thread);
if (!lock.is_locked() || lock.get_mutex() != s->vio.mutex.get()) {
write_reschedule(nh, vc);
diff --git a/iocore/net/UnixUDPNet.cc b/iocore/net/UnixUDPNet.cc
index 71b45bb..3f73b1b 100644
--- a/iocore/net/UnixUDPNet.cc
+++ b/iocore/net/UnixUDPNet.cc
@@ -217,7 +217,7 @@ UDPNetProcessorInternal::udp_callback(UDPNetHandler *nh,
UDPConnection *xuc, ETh
UnixUDPConnection *uc = (UnixUDPConnection *)xuc;
if (uc->continuation && uc->mutex) {
- MUTEX_TRY_LOCK_FOR(lock, uc->mutex, thread, uc->continuation);
+ MUTEX_TRY_LOCK(lock, uc->mutex, thread);
if (!lock.is_locked()) {
return 1;
}