Module Name: src Committed By: ad Date: Wed Jun 10 22:45:15 UTC 2020
Modified Files: src/lib/libpthread: pthread.c pthread_cond.c pthread_int.h pthread_mutex.c pthread_types.h Log Message: - Make pthread_condvar and pthread_mutex work on the stack rather than in pthread_t, so there's less chance of bad things happening if someone calls (for example) pthread_cond_broadcast() from a signal handler. - Remove all the deferred waiter handling except for the one case that really matters which is transferring waiters from condvar -> mutex on wakeup, and do that by splicing the condvar's waiters onto the mutex. - Remove the mutex waiters bit as it's another complication that's not strictly needed. To generate a diff of this commit: cvs rdiff -u -r1.174 -r1.175 src/lib/libpthread/pthread.c cvs rdiff -u -r1.73 -r1.74 src/lib/libpthread/pthread_cond.c cvs rdiff -u -r1.106 -r1.107 src/lib/libpthread/pthread_int.h cvs rdiff -u -r1.79 -r1.80 src/lib/libpthread/pthread_mutex.c cvs rdiff -u -r1.24 -r1.25 src/lib/libpthread/pthread_types.h Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/lib/libpthread/pthread.c diff -u src/lib/libpthread/pthread.c:1.174 src/lib/libpthread/pthread.c:1.175 --- src/lib/libpthread/pthread.c:1.174 Thu Jun 4 00:45:32 2020 +++ src/lib/libpthread/pthread.c Wed Jun 10 22:45:15 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pthread.c,v 1.174 2020/06/04 00:45:32 joerg Exp $ */ +/* $NetBSD: pthread.c,v 1.175 2020/06/10 22:45:15 ad Exp $ */ /*- * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008, 2020 @@ -31,7 +31,7 @@ */ #include <sys/cdefs.h> -__RCSID("$NetBSD: pthread.c,v 1.174 2020/06/04 00:45:32 joerg Exp $"); +__RCSID("$NetBSD: pthread.c,v 1.175 2020/06/10 22:45:15 ad Exp $"); #define __EXPOSE_STACK 1 @@ -297,11 +297,7 @@ pthread__initthread(pthread_t t) t->pt_self = t; t->pt_magic = PT_MAGIC; - t->pt_willpark = 0; - t->pt_waiters[0] = 0; - t->pt_nwaiters = 0; t->pt_sleepobj = NULL; - t->pt_signalled = 0; t->pt_havespecific = 0; t->pt_lwpctl = &pthread__dummy_lwpctl; @@ -602,48 +598,6 @@ pthread_resume_np(pthread_t thread) return errno; } -/* - * Wake all deferred waiters hanging off self. - * - * It's possible for threads to have called _lwp_exit() before we wake them, - * because of cancellation and timeout, so ESRCH is tolerated here. If a - * thread exits and its LID is reused, and the a thread receives an wakeup - * meant for the previous incarnation of the LID, no harm will be done. - */ -void -pthread__clear_waiters(pthread_t self) -{ - int rv; - - pthread__smt_wake(); - - switch (self->pt_nwaiters) { - case 0: - break; - case 1: - if (self->pt_willpark) { - break; - } - rv = _lwp_unpark(self->pt_waiters[0], NULL); - self->pt_waiters[0] = 0; - self->pt_nwaiters = 0; - if (rv != 0 && errno != ESRCH) { - pthread__errorfunc(__FILE__, __LINE__, __func__, - "_lwp_unpark failed: %d", errno); - } - break; - default: - rv = _lwp_unpark_all(self->pt_waiters, self->pt_nwaiters, NULL); - self->pt_waiters[0] = 0; - self->pt_nwaiters = 0; - if (rv != 0 && errno != ESRCH) { - pthread__errorfunc(__FILE__, __LINE__, __func__, - "_lwp_unpark_all failed: %d", errno); - } - break; - } -} - void pthread_exit(void *retval) { @@ -658,7 +612,6 @@ pthread_exit(void *retval) self = pthread__self(); /* Disable cancellability. */ - self->pt_willpark = 0; pthread_mutex_lock(&self->pt_lock); self->pt_flags |= PT_FLAG_CS_DISABLED; self->pt_cancel = 0; @@ -692,14 +645,10 @@ pthread_exit(void *retval) if (self->pt_flags & PT_FLAG_DETACHED) { /* pthread__reap() will drop the lock. */ pthread__reap(self); - pthread__assert(!self->pt_willpark); - pthread__clear_waiters(self); _lwp_exit(); } else { self->pt_state = PT_STATE_ZOMBIE; - pthread__assert(!self->pt_willpark); pthread_mutex_unlock(&self->pt_lock); - pthread__clear_waiters(self); /* Note: name will be freed by the joiner. */ _lwp_exit(); } @@ -1166,9 +1115,7 @@ pthread__park(pthread_t self, pthread_mu { int rv, error; - self->pt_willpark = 1; pthread_mutex_unlock(lock); - self->pt_willpark = 0; /* * Wait until we are awoken by a pending unpark operation, @@ -1194,13 +1141,8 @@ pthread__park(pthread_t self, pthread_mu * If we deferred unparking a thread, arrange to * have _lwp_park() restart it before blocking. */ - pthread__assert(self->pt_nwaiters <= 1); - pthread__assert(self->pt_nwaiters != 0 || - self->pt_waiters[0] == 0); error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, - __UNCONST(abstime), self->pt_waiters[0], NULL, NULL); - self->pt_waiters[0] = 0; - self->pt_nwaiters = 0; + __UNCONST(abstime), 0, NULL, NULL); if (error != 0) { switch (rv = errno) { case EINTR: @@ -1230,31 +1172,34 @@ pthread__unpark(pthread_queue_t *queue, pthread_t target; target = PTQ_FIRST(queue); - if (self->pt_nwaiters == pthread__unpark_max) { - pthread__clear_waiters(self); - } target->pt_sleepobj = NULL; - self->pt_waiters[self->pt_nwaiters++] = target->pt_lid; PTQ_REMOVE(queue, target, pt_sleep); - pthread__mutex_deferwake(self, interlock); + (void)_lwp_unpark(target->pt_lid, NULL); } void pthread__unpark_all(pthread_queue_t *queue, pthread_t self, pthread_mutex_t *interlock) { - const size_t max = pthread__unpark_max; + lwpid_t lids[PTHREAD__UNPARK_MAX]; + const size_t mlid = pthread__unpark_max; pthread_t target; + size_t nlid = 0; PTQ_FOREACH(target, queue, pt_sleep) { - if (self->pt_nwaiters == max) { - pthread__clear_waiters(self); + if (nlid == mlid) { + (void)_lwp_unpark_all(lids, nlid, NULL); + nlid = 0; } target->pt_sleepobj = NULL; - self->pt_waiters[self->pt_nwaiters++] = target->pt_lid; + lids[nlid++] = target->pt_lid; } PTQ_INIT(queue); - pthread__mutex_deferwake(self, interlock); + if (nlid == 1) { + (void)_lwp_unpark(lids[0], NULL); + } else if (nlid > 1) { + (void)_lwp_unpark_all(lids, nlid, NULL); + } } #undef OOPS Index: src/lib/libpthread/pthread_cond.c diff -u src/lib/libpthread/pthread_cond.c:1.73 src/lib/libpthread/pthread_cond.c:1.74 --- src/lib/libpthread/pthread_cond.c:1.73 Sat Jun 6 22:23:59 2020 +++ src/lib/libpthread/pthread_cond.c Wed Jun 10 22:45:15 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pthread_cond.c,v 1.73 2020/06/06 22:23:59 ad Exp $ */ +/* $NetBSD: pthread_cond.c,v 1.74 2020/06/10 22:45:15 ad Exp $ */ /*- * Copyright (c) 2001, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. @@ -30,7 +30,7 @@ */ #include <sys/cdefs.h> -__RCSID("$NetBSD: pthread_cond.c,v 1.73 2020/06/06 22:23:59 ad Exp $"); +__RCSID("$NetBSD: pthread_cond.c,v 1.74 2020/06/10 22:45:15 ad Exp $"); #include <stdlib.h> #include <errno.h> @@ -111,8 +111,9 @@ int pthread_cond_timedwait(pthread_cond_t *cond, pthread_mutex_t *mutex, const struct timespec *abstime) { - pthread_t self, next, waiters; - int retval, cancel; + struct pthread__waiter waiter, *next, *waiters; + pthread_t self; + int error, cancel; clockid_t clkid = pthread_cond_getclock(cond); if (__predict_false(__uselibcstub)) @@ -126,7 +127,7 @@ pthread_cond_timedwait(pthread_cond_t *c mutex->ptm_owner != NULL); self = pthread__self(); - pthread__assert(!self->pt_condwait); + pthread__assert(self->pt_lid != 0); if (__predict_false(self->pt_cancel)) { pthread__cancelled(); @@ -134,44 +135,37 @@ pthread_cond_timedwait(pthread_cond_t *c /* Note this thread as waiting on the CV. */ cond->ptc_mutex = mutex; - self->pt_condwait = 1; for (waiters = cond->ptc_waiters;; waiters = next) { - self->pt_condnext = waiters; + waiter.lid = self->pt_lid; + waiter.next = waiters; #ifndef PTHREAD__ATOMIC_IS_MEMBAR membar_producer(); #endif - next = atomic_cas_ptr(&cond->ptc_waiters, waiters, self); - if (next == waiters) + next = atomic_cas_ptr(&cond->ptc_waiters, waiters, &waiter); + if (__predict_true(next == waiters)) { break; + } } /* Drop the interlock */ - self->pt_willpark = 1; pthread_mutex_unlock(mutex); - self->pt_willpark = 0; + error = 0; - do { - pthread__assert(self->pt_nwaiters <= 1); - pthread__assert(self->pt_nwaiters != 0 || - self->pt_waiters[0] == 0); - retval = _lwp_park(clkid, TIMER_ABSTIME, __UNCONST(abstime), - self->pt_waiters[0], NULL, NULL); - self->pt_waiters[0] = 0; - self->pt_nwaiters = 0; - if (__predict_false(retval != 0)) { - if (errno == EINTR || errno == EALREADY || - errno == ESRCH) { - retval = 0; - } else { - retval = errno; - } + while (waiter.lid && !(cancel = self->pt_cancel)) { + int rv = _lwp_park(clkid, TIMER_ABSTIME, __UNCONST(abstime), + 0, NULL, NULL); + if (rv == 0) { + continue; + } + if (errno != EINTR && errno != EALREADY && errno != ESRCH) { + error = errno; + break; } - cancel = self->pt_cancel; - } while (self->pt_condwait && !cancel && !retval); + } /* * If this thread absorbed a wakeup from pthread_cond_signal() and - * cannot take the wakeup, we must ensure that another thread does. + * cannot take the wakeup, we should ensure that another thread does. * * And if awoken early, we may still be on the waiter list and must * remove self. @@ -181,18 +175,20 @@ pthread_cond_timedwait(pthread_cond_t *c * - wakeup could be deferred until mutex release * - it would be mixing up two sets of waitpoints */ - if (__predict_false(self->pt_condwait | cancel | retval)) { + if (__predict_false(cancel | error)) { pthread_cond_broadcast(cond); /* * Might have raced with another thread to do the wakeup. - * Wait until released - this thread can't wait on a condvar - * again until the data structures are no longer in us. + * Wait until released, otherwise "waiter" is still globally + * visible. */ - while (self->pt_condwait) { - (void)_lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, NULL, - 0, NULL, NULL); + while (__predict_false(waiter.lid)) { + (void)_lwp_park(CLOCK_MONOTONIC, 0, NULL, 0, NULL, + NULL); } + } else { + pthread__assert(!waiter.lid); } /* @@ -204,7 +200,7 @@ pthread_cond_timedwait(pthread_cond_t *c pthread__cancelled(); } - return retval; + return error; } int @@ -219,8 +215,9 @@ pthread_cond_wait(pthread_cond_t *cond, int pthread_cond_signal(pthread_cond_t *cond) { - pthread_t self, thread, next; + struct pthread__waiter *waiter, *next; pthread_mutex_t *mutex; + pthread_t self; if (__predict_false(__uselibcstub)) return __libc_cond_signal_stub(cond); @@ -231,33 +228,30 @@ pthread_cond_signal(pthread_cond_t *cond /* Take ownership of one waiter. */ self = pthread_self(); mutex = cond->ptc_mutex; - for (thread = cond->ptc_waiters;; thread = next) { - if (thread == NULL) { + for (waiter = cond->ptc_waiters;; waiter = next) { + if (waiter == NULL) { return 0; } membar_datadep_consumer(); /* for alpha */ - next = thread->pt_condnext; - next = atomic_cas_ptr(&cond->ptc_waiters, thread, next); - if (__predict_true(next == thread)) { + next = waiter->next; + next = atomic_cas_ptr(&cond->ptc_waiters, waiter, next); + if (__predict_true(next == waiter)) { break; } } - if (self->pt_nwaiters >= pthread__unpark_max) { - pthread__clear_waiters(self); - } - self->pt_waiters[self->pt_nwaiters++] = thread->pt_lid; - membar_sync(); - thread->pt_condwait = 0; - /* No longer safe to touch 'thread' */ - pthread__mutex_deferwake(self, mutex); + + /* Now transfer waiter to the mutex. */ + waiter->next = NULL; + pthread__mutex_deferwake(self, mutex, waiter); return 0; } int pthread_cond_broadcast(pthread_cond_t *cond) { - pthread_t self, thread, next; + struct pthread__waiter *head; pthread_mutex_t *mutex; + pthread_t self; if (__predict_false(__uselibcstub)) return __libc_cond_broadcast_stub(cond); @@ -271,26 +265,14 @@ pthread_cond_broadcast(pthread_cond_t *c /* Take ownership of the current set of waiters. */ self = pthread_self(); mutex = cond->ptc_mutex; - thread = atomic_swap_ptr(&cond->ptc_waiters, NULL); + head = atomic_swap_ptr(&cond->ptc_waiters, NULL); + if (head == NULL) { + return 0; + } membar_datadep_consumer(); /* for alpha */ - /* - * Pull waiters from the queue and add to our list. Use a memory - * barrier to ensure that we safely read the value of pt_condnext - * before 'thread' sees pt_condwait being cleared. - */ - while (thread != NULL) { - if (self->pt_nwaiters >= pthread__unpark_max) { - pthread__clear_waiters(self); - } - next = thread->pt_condnext; - self->pt_waiters[self->pt_nwaiters++] = thread->pt_lid; - membar_sync(); - thread->pt_condwait = 0; - /* No longer safe to touch 'thread' */ - thread = next; - } - pthread__mutex_deferwake(self, mutex); + /* Now transfer waiters to the mutex. */ + pthread__mutex_deferwake(self, mutex, head); return 0; } Index: src/lib/libpthread/pthread_int.h diff -u src/lib/libpthread/pthread_int.h:1.106 src/lib/libpthread/pthread_int.h:1.107 --- src/lib/libpthread/pthread_int.h:1.106 Tue Jun 2 00:29:53 2020 +++ src/lib/libpthread/pthread_int.h Wed Jun 10 22:45:15 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pthread_int.h,v 1.106 2020/06/02 00:29:53 joerg Exp $ */ +/* $NetBSD: pthread_int.h,v 1.107 2020/06/10 22:45:15 ad Exp $ */ /*- * Copyright (c) 2001, 2002, 2003, 2006, 2007, 2008, 2020 @@ -104,15 +104,10 @@ struct __pthread_st { size_t pt_guardsize; void *pt_exitval; /* Read by pthread_join() */ char *pt_name; /* Thread's name, set by the app. */ - int pt_willpark; /* About to park */ struct pthread_lock_ops pt_lockops;/* Cached to avoid PIC overhead */ void *(*pt_func)(void *);/* Function to call at start. */ void *pt_arg; /* Argument to pass at start. */ - /* Threads to defer waking, usually until pthread_mutex_unlock(). */ - lwpid_t pt_waiters[PTHREAD__UNPARK_MAX]; - size_t pt_nwaiters; - /* Stack of cancellation cleanup handlers and their arguments */ PTQ_HEAD(, pt_clean_t) pt_cleanup_stack; @@ -139,11 +134,6 @@ struct __pthread_st { int pt_dummy1 __aligned(COHERENCY_UNIT); struct lwpctl *pt_lwpctl; /* Kernel/user comms area */ volatile int pt_rwlocked; /* Handed rwlock successfully */ - volatile int pt_signalled; /* Received pthread_cond_signal() */ - volatile int pt_mutexwait; /* Waiting to acquire mutex */ - volatile int pt_condwait; /* Waiting to acquire mutex */ - void * volatile pt_mutexnext; /* Next thread in chain */ - void * volatile pt_condnext; /* Next thread in chain */ void * volatile pt_sleepobj; /* Object slept on */ PTQ_ENTRY(__pthread_st) pt_sleep; @@ -187,6 +177,11 @@ extern int pthread_keys_max; extern int __uselibcstub; +struct pthread__waiter { + struct pthread__waiter *volatile next; + lwpid_t volatile lid; +}; + /* Flag to be used in a ucontext_t's uc_flags indicating that * the saved register state is "user" state only, not full * trap state. @@ -202,7 +197,6 @@ void pthread__unpark(pthread_queue_t *, int pthread__park(pthread_t, pthread_mutex_t *, pthread_queue_t *, const struct timespec *, int) PTHREAD_HIDE; pthread_mutex_t *pthread__hashlock(volatile const void *) PTHREAD_HIDE; -void pthread__clear_waiters(pthread_t) PTHREAD_HIDE; /* Internal locking primitives */ void pthread__lockprim_init(void) PTHREAD_HIDE; @@ -304,7 +298,8 @@ void pthread__errorfunc(const char *, in __printflike(4, 5) PTHREAD_HIDE; char *pthread__getenv(const char *) PTHREAD_HIDE; __dead void pthread__cancelled(void) PTHREAD_HIDE; -void pthread__mutex_deferwake(pthread_t, pthread_mutex_t *) PTHREAD_HIDE; +void pthread__mutex_deferwake(pthread_t, pthread_mutex_t *, + struct pthread__waiter *) PTHREAD_HIDE; int pthread__checkpri(int) PTHREAD_HIDE; int pthread__add_specific(pthread_t, pthread_key_t, const void *) PTHREAD_HIDE; Index: src/lib/libpthread/pthread_mutex.c diff -u src/lib/libpthread/pthread_mutex.c:1.79 src/lib/libpthread/pthread_mutex.c:1.80 --- src/lib/libpthread/pthread_mutex.c:1.79 Wed Jun 3 22:10:24 2020 +++ src/lib/libpthread/pthread_mutex.c Wed Jun 10 22:45:15 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pthread_mutex.c,v 1.79 2020/06/03 22:10:24 ad Exp $ */ +/* $NetBSD: pthread_mutex.c,v 1.80 2020/06/10 22:45:15 ad Exp $ */ /*- * Copyright (c) 2001, 2003, 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. @@ -47,7 +47,7 @@ */ #include <sys/cdefs.h> -__RCSID("$NetBSD: pthread_mutex.c,v 1.79 2020/06/03 22:10:24 ad Exp $"); +__RCSID("$NetBSD: pthread_mutex.c,v 1.80 2020/06/10 22:45:15 ad Exp $"); #include <sys/types.h> #include <sys/lwpctl.h> @@ -65,12 +65,10 @@ __RCSID("$NetBSD: pthread_mutex.c,v 1.79 #include "pthread_int.h" #include "reentrant.h" -#define MUTEX_WAITERS_BIT ((uintptr_t)0x01) #define MUTEX_RECURSIVE_BIT ((uintptr_t)0x02) #define MUTEX_PROTECT_BIT ((uintptr_t)0x08) #define MUTEX_THREAD ((uintptr_t)~0x0f) -#define MUTEX_HAS_WAITERS(x) ((uintptr_t)(x) & MUTEX_WAITERS_BIT) #define MUTEX_RECURSIVE(x) ((uintptr_t)(x) & MUTEX_RECURSIVE_BIT) #define MUTEX_PROTECT(x) ((uintptr_t)(x) & MUTEX_PROTECT_BIT) #define MUTEX_OWNER(x) ((uintptr_t)(x) & MUTEX_THREAD) @@ -94,7 +92,12 @@ __RCSID("$NetBSD: pthread_mutex.c,v 1.79 #define NOINLINE /* nothing */ #endif -static void pthread__mutex_wakeup(pthread_t, pthread_mutex_t *); +struct waiter { + struct waiter *volatile next; + lwpid_t volatile lid; +}; + +static void pthread__mutex_wakeup(pthread_t, struct pthread__waiter *); static int pthread__mutex_lock_slow(pthread_mutex_t *, const struct timespec *); static void pthread__mutex_pause(void); @@ -268,7 +271,8 @@ pthread__mutex_spin(pthread_mutex_t *ptm NOINLINE static int pthread__mutex_lock_slow(pthread_mutex_t *ptm, const struct timespec *ts) { - void *waiters, *newval, *owner, *next; + void *newval, *owner, *next; + struct waiter waiter; pthread_t self; int serrno; int error; @@ -277,8 +281,7 @@ pthread__mutex_lock_slow(pthread_mutex_t self = pthread__self(); serrno = errno; - pthread__assert(!self->pt_willpark); - pthread__assert(!self->pt_mutexwait); + pthread__assert(self->pt_lid != 0); /* Recursive or errorcheck? */ if (MUTEX_OWNER(owner) == (uintptr_t)self) { @@ -323,64 +326,57 @@ pthread__mutex_lock_slow(pthread_mutex_t /* * Nope, still held. Add thread to the list of waiters. - * Issue a memory barrier to ensure mutexwait/mutexnext - * are visible before we enter the waiters list. + * Issue a memory barrier to ensure stores to 'waiter' + * are visible before we enter the list. */ - self->pt_mutexwait = 1; - for (waiters = ptm->ptm_waiters;; waiters = next) { - self->pt_mutexnext = waiters; + waiter.next = ptm->ptm_waiters; + waiter.lid = self->pt_lid; #ifndef PTHREAD__ATOMIC_IS_MEMBAR - membar_producer(); + membar_producer(); #endif - next = atomic_cas_ptr(&ptm->ptm_waiters, waiters, self); - if (next == waiters) - break; + next = atomic_cas_ptr(&ptm->ptm_waiters, waiter.next, &waiter); + if (next != waiter.next) { + owner = ptm->ptm_owner; + continue; } - + /* - * Try to set the waiters bit. If the mutex has become free - * since entering self onto the waiters list, need to wake - * everybody up (including self) and retry. It's possible - * to race with the unlocking thread, so self may have - * already been awoken. + * If the mutex has become free since entering self onto the + * waiters list, need to wake everybody up (including self) + * and retry. It's possible to race with an unlocking + * thread, so self may have already been awoken. */ #ifndef PTHREAD__ATOMIC_IS_MEMBAR - membar_sync(); + membar_enter(); #endif - next = atomic_cas_ptr(&ptm->ptm_owner, owner, - (void *)((uintptr_t)owner | MUTEX_WAITERS_BIT)); - if (next != owner) { - pthread__mutex_wakeup(self, ptm); + if (MUTEX_OWNER(ptm->ptm_owner) == 0) { + pthread__mutex_wakeup(self, + atomic_swap_ptr(&ptm->ptm_waiters, NULL)); } /* * We must not proceed until told that we are no longer - * waiting (via pt_mutexwait being set to zero). Otherwise - * it is unsafe to re-enter the thread onto the waiters - * list. + * waiting (via waiter.lid being set to zero). Otherwise + * it's unsafe to re-enter "waiter" onto the waiters list. */ - do { - pthread__assert(self->pt_nwaiters <= 1); - pthread__assert(self->pt_nwaiters != 0 || - self->pt_waiters[0] == 0); + while (waiter.lid != 0) { error = _lwp_park(CLOCK_REALTIME, TIMER_ABSTIME, - __UNCONST(ts), self->pt_waiters[0], NULL, NULL); - self->pt_waiters[0] = 0; - self->pt_nwaiters = 0; + __UNCONST(ts), 0, NULL, NULL); if (error < 0 && errno == ETIMEDOUT) { /* Remove self from waiters list */ - pthread__mutex_wakeup(self, ptm); + pthread__mutex_wakeup(self, + atomic_swap_ptr(&ptm->ptm_waiters, NULL)); /* * Might have raced with another thread to * do the wakeup. In any case there will be * a wakeup for sure. Eat it and wait for - * pt_mutexwait to clear. + * waiter.lid to clear. */ - do { - (void)_lwp_park(CLOCK_REALTIME, - TIMER_ABSTIME, NULL, 0, NULL, NULL); - } while (self->pt_mutexwait); + while (waiter.lid != 0) { + (void)_lwp_park(CLOCK_MONOTONIC, 0, + NULL, 0, NULL, NULL); + } /* Priority protect */ if (MUTEX_PROTECT(owner)) @@ -388,7 +384,7 @@ pthread__mutex_lock_slow(pthread_mutex_t errno = serrno; return ETIMEDOUT; } - } while (self->pt_mutexwait); + } owner = ptm->ptm_owner; } } @@ -440,7 +436,7 @@ int pthread_mutex_unlock(pthread_mutex_t *ptm) { pthread_t self; - void *val; + void *val, *newval; int error; if (__predict_false(__uselibcstub)) @@ -454,11 +450,11 @@ pthread_mutex_unlock(pthread_mutex_t *pt #endif error = 0; self = pthread__self(); + newval = NULL; - val = atomic_cas_ptr(&ptm->ptm_owner, self, NULL); + val = atomic_cas_ptr(&ptm->ptm_owner, self, newval); if (__predict_false(val != self)) { bool weown = (MUTEX_OWNER(val) == (uintptr_t)self); - void *newval = val; if (__SIMPLELOCK_LOCKED_P(&ptm->ptm_errorcheck)) { if (!weown) { error = EPERM; @@ -503,46 +499,48 @@ pthread_mutex_unlock(pthread_mutex_t *pt #ifndef PTHREAD__ATOMIC_IS_MEMBAR membar_enter(); #endif - if (MUTEX_HAS_WAITERS(val)) { - pthread__mutex_wakeup(self, ptm); - } else if (self->pt_nwaiters > 0) { - pthread__clear_waiters(self); + if (MUTEX_OWNER(newval) == 0 && ptm->ptm_waiters != NULL) { + pthread__mutex_wakeup(self, + atomic_swap_ptr(&ptm->ptm_waiters, NULL)); } return error; } /* * pthread__mutex_wakeup: unpark threads waiting for us - * - * unpark threads on the ptm->ptm_waiters list and self->pt_waiters. */ static void -pthread__mutex_wakeup(pthread_t self, pthread_mutex_t *ptm) +pthread__mutex_wakeup(pthread_t self, struct pthread__waiter *cur) { - pthread_t thread, next; - - /* Take ownership of the current set of waiters. */ - thread = atomic_swap_ptr(&ptm->ptm_waiters, NULL); - membar_datadep_consumer(); /* for alpha */ + lwpid_t lids[PTHREAD__UNPARK_MAX]; + const size_t mlid = pthread__unpark_max; + struct pthread__waiter *next; + size_t nlid; /* * Pull waiters from the queue and add to our list. Use a memory - * barrier to ensure that we safely read the value of pt_mutexnext - * before 'thread' sees pt_mutexwait being cleared. + * barrier to ensure that we safely read the value of waiter->next + * before the awoken thread sees waiter->lid being cleared. */ - while (thread != NULL) { - if (self->pt_nwaiters >= pthread__unpark_max) { - pthread__clear_waiters(self); + membar_datadep_consumer(); /* for alpha */ + for (nlid = 0; cur != NULL; cur = next) { + if (nlid == mlid) { + (void)_lwp_unpark_all(lids, nlid, NULL); + nlid = 0; } - next = thread->pt_mutexnext; - self->pt_waiters[self->pt_nwaiters++] = thread->pt_lid; + next = cur->next; + pthread__assert(cur->lid != 0); + lids[nlid++] = cur->lid; membar_sync(); - thread->pt_mutexwait = 0; - /* No longer safe to touch 'thread' */ - thread = next; + cur->lid = 0; + /* No longer safe to touch 'cur' */ + } + if (nlid == 1) { + (void)_lwp_unpark(lids[0], NULL); + } else if (nlid > 1) { + (void)_lwp_unpark_all(lids, nlid, NULL); } - pthread__clear_waiters(self); } int @@ -690,19 +688,41 @@ pthread_mutexattr_setpshared(pthread_mut #endif /* - * pthread__mutex_deferwake: try to defer unparking threads in self->pt_waiters - * * In order to avoid unnecessary contention on interlocking mutexes, we try * to defer waking up threads until we unlock the mutex. The threads will - * be woken up when the calling thread (self) releases a mutex. + * be woken up when the calling thread (self) releases the mutex. */ void -pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm) +pthread__mutex_deferwake(pthread_t self, pthread_mutex_t *ptm, + struct pthread__waiter *head) { + struct pthread__waiter *tail, *n, *o; + + pthread__assert(head != NULL); if (__predict_false(ptm == NULL || MUTEX_OWNER(ptm->ptm_owner) != (uintptr_t)self)) { - pthread__clear_waiters(self); + pthread__mutex_wakeup(self, head); + return; + } + + /* This is easy if no existing waiters on mutex. */ + if (atomic_cas_ptr(&ptm->ptm_waiters, NULL, head) == NULL) { + return; + } + + /* Oops need to append. Find the tail of the new queue. */ + for (tail = head; tail->next != NULL; tail = tail->next) { + /* nothing */ + } + + /* Append atomically. */ + for (o = ptm->ptm_waiters;; o = n) { + tail->next = o; + n = atomic_cas_ptr(&ptm->ptm_waiters, o, head); + if (__predict_true(n == o)) { + break; + } } } Index: src/lib/libpthread/pthread_types.h diff -u src/lib/libpthread/pthread_types.h:1.24 src/lib/libpthread/pthread_types.h:1.25 --- src/lib/libpthread/pthread_types.h:1.24 Mon Jun 1 11:44:59 2020 +++ src/lib/libpthread/pthread_types.h Wed Jun 10 22:45:15 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: pthread_types.h,v 1.24 2020/06/01 11:44:59 ad Exp $ */ +/* $NetBSD: pthread_types.h,v 1.25 2020/06/10 22:45:15 ad Exp $ */ /*- * Copyright (c) 2001, 2008, 2020 The NetBSD Foundation, Inc. @@ -130,7 +130,7 @@ struct __pthread_mutex_st { uint8_t ptm_pad2[3]; #endif __pthread_volatile pthread_t ptm_owner; - pthread_t * __pthread_volatile ptm_waiters; + void * __pthread_volatile ptm_waiters; unsigned int ptm_recursed; void *ptm_spare2; /* unused - backwards compat */ }; @@ -172,7 +172,7 @@ struct __pthread_cond_st { /* Protects the queue of waiters */ __pthread_spin_t ptc_lock; - pthread_t volatile ptc_waiters; + void *volatile ptc_waiters; void *ptc_spare; pthread_mutex_t *ptc_mutex; /* Current mutex */